Compare commits
132 Commits
v0.15.0rc0
...
v0.16.0rc0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
133765760b | ||
|
|
bfb9bdaf3f | ||
|
|
2284461d02 | ||
|
|
8e2a469b3b | ||
|
|
23591e631e | ||
|
|
0493d897c4 | ||
|
|
8c8ebeb941 | ||
|
|
831453fcef | ||
|
|
5a66c9cc76 | ||
|
|
5e73e4900c | ||
|
|
c6e7404cc5 | ||
|
|
17b17c0684 | ||
|
|
8bb6271c77 | ||
|
|
8b3f0a99dd | ||
|
|
8311f083bd | ||
|
|
40c35038d2 | ||
|
|
a5aa4d5c0f | ||
|
|
615e8033e5 | ||
|
|
d09135fbd0 | ||
|
|
8688c3d460 | ||
|
|
5400014d55 | ||
|
|
3a92c6f3b5 | ||
|
|
e01ff5c070 | ||
|
|
fb946a7f89 | ||
|
|
a650ad1588 | ||
|
|
d697581a7c | ||
|
|
5eeba80c74 | ||
|
|
08b1195e62 | ||
|
|
3bba2edb0f | ||
|
|
53fc166402 | ||
|
|
31b25f6516 | ||
|
|
abb34ac43a | ||
|
|
2515bbd027 | ||
|
|
c487a8eef4 | ||
|
|
9e138cb01d | ||
|
|
f9d03599ef | ||
|
|
39037d258e | ||
|
|
51550179fc | ||
|
|
07ea184f00 | ||
|
|
a663b218ae | ||
|
|
1bd47d6e5a | ||
|
|
141cd43967 | ||
|
|
6bf3b46d78 | ||
|
|
77c4f45c6c | ||
|
|
ca1969186d | ||
|
|
ab597c869a | ||
|
|
4197168ea5 | ||
|
|
59bcc5b6f2 | ||
|
|
3e440786af | ||
|
|
8bdd3979d8 | ||
|
|
c4e744dbd4 | ||
|
|
8ebf372e9d | ||
|
|
f210f0b7b1 | ||
|
|
392c5af4fe | ||
|
|
af9b69f977 | ||
|
|
8e5e40daf4 | ||
|
|
2e8de86777 | ||
|
|
247d1a32ea | ||
|
|
ecb4f82209 | ||
|
|
5914090765 | ||
|
|
f1acbd68c5 | ||
|
|
9581185d51 | ||
|
|
2dd359f953 | ||
|
|
22ad649501 | ||
|
|
36d450e3b8 | ||
|
|
a2b877df6c | ||
|
|
35fb0b8613 | ||
|
|
2eb673a088 | ||
|
|
a97b5e206d | ||
|
|
911b51b69f | ||
|
|
604e3b87e8 | ||
|
|
706f123b23 | ||
|
|
fb7abfc1d0 | ||
|
|
5d3d6e44e8 | ||
|
|
46ec6d71c7 | ||
|
|
e82fa448c4 | ||
|
|
d9aa39a3bb | ||
|
|
3a6d5cbefd | ||
|
|
f5d7049cc1 | ||
|
|
3c3c547ce0 | ||
|
|
1cbccb6dba | ||
|
|
bd92089d33 | ||
|
|
a6760f1525 | ||
|
|
66e601ef79 | ||
|
|
0cd259b2d8 | ||
|
|
83fb2d09e8 | ||
|
|
f3a5ee705f | ||
|
|
7cbbca9aaa | ||
|
|
5ec44056f7 | ||
|
|
492a7983dd | ||
|
|
a608b4c6c2 | ||
|
|
1f3a2c2944 | ||
|
|
7227d06156 | ||
|
|
14385c80fc | ||
|
|
76139d0801 | ||
|
|
da8d0c441a | ||
|
|
58996f3589 | ||
|
|
b539f988e1 | ||
|
|
6c00645712 | ||
|
|
b781eeaa15 | ||
|
|
e0b005d9cf | ||
|
|
3b8f0fe59e | ||
|
|
c831911be2 | ||
|
|
157caf511b | ||
|
|
0b53bec60b | ||
|
|
c568581ff3 | ||
|
|
2d7053438a | ||
|
|
5a93b9162b | ||
|
|
6d86fde09c | ||
|
|
510ed1e8d3 | ||
|
|
8caffd92df | ||
|
|
58a05b0ca1 | ||
|
|
6ee7f18f33 | ||
|
|
8f987883cb | ||
|
|
ebe0ba91db | ||
|
|
43a013c3a2 | ||
|
|
c25dbee40d | ||
|
|
19ab0f7ce5 | ||
|
|
67fe677c53 | ||
|
|
d56afd45fd | ||
|
|
a2393ed496 | ||
|
|
be6931ee27 | ||
|
|
9ef3b718d9 | ||
|
|
bb17e8f11c | ||
|
|
dcd80206b7 | ||
|
|
f4a0921c9c | ||
|
|
208c56256f | ||
|
|
9ac818a551 | ||
|
|
6ca2c91b96 | ||
|
|
e33192b269 | ||
|
|
61274bdef5 | ||
|
|
b40db4dfec |
@@ -1,7 +1,8 @@
|
|||||||
name: vllm_ci
|
name: vllm_ci
|
||||||
job_dirs:
|
job_dirs:
|
||||||
- ".buildkite/test_areas"
|
|
||||||
- ".buildkite/image_build"
|
- ".buildkite/image_build"
|
||||||
|
- ".buildkite/test_areas"
|
||||||
|
- ".buildkite/hardware_tests"
|
||||||
run_all_patterns:
|
run_all_patterns:
|
||||||
- "docker/Dockerfile"
|
- "docker/Dockerfile"
|
||||||
- "CMakeLists.txt"
|
- "CMakeLists.txt"
|
||||||
|
|||||||
29
.buildkite/hardware_tests/amd.yaml
Normal file
29
.buildkite/hardware_tests/amd.yaml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
group: Hardware
|
||||||
|
steps:
|
||||||
|
- label: "AMD: :docker: build image"
|
||||||
|
depends_on: []
|
||||||
|
device: amd_cpu
|
||||||
|
no_plugin: true
|
||||||
|
commands:
|
||||||
|
- >
|
||||||
|
docker build
|
||||||
|
--build-arg max_jobs=16
|
||||||
|
--build-arg REMOTE_VLLM=1
|
||||||
|
--build-arg ARG_PYTORCH_ROCM_ARCH='gfx90a;gfx942'
|
||||||
|
--build-arg VLLM_BRANCH=$BUILDKITE_COMMIT
|
||||||
|
--tag "rocm/vllm-ci:${BUILDKITE_COMMIT}"
|
||||||
|
-f docker/Dockerfile.rocm
|
||||||
|
--target test
|
||||||
|
--no-cache
|
||||||
|
--progress plain .
|
||||||
|
- docker push "rocm/vllm-ci:${BUILDKITE_COMMIT}"
|
||||||
|
env:
|
||||||
|
DOCKER_BUILDKIT: "1"
|
||||||
|
retry:
|
||||||
|
automatic:
|
||||||
|
- exit_status: -1 # Agent was lost
|
||||||
|
limit: 1
|
||||||
|
- exit_status: -10 # Agent was lost
|
||||||
|
limit: 1
|
||||||
|
- exit_status: 1 # Machine occasionally fail
|
||||||
|
limit: 1
|
||||||
8
.buildkite/hardware_tests/arm.yaml
Normal file
8
.buildkite/hardware_tests/arm.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
group: Hardware
|
||||||
|
steps:
|
||||||
|
- label: "Arm CPU Test"
|
||||||
|
soft_fail: true
|
||||||
|
device: arm_cpu
|
||||||
|
no_plugin: true
|
||||||
|
commands:
|
||||||
|
- bash .buildkite/scripts/hardware_ci/run-cpu-test-arm.sh
|
||||||
10
.buildkite/hardware_tests/ascend_npu.yaml
Normal file
10
.buildkite/hardware_tests/ascend_npu.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
group: Hardware
|
||||||
|
depends_on: ~
|
||||||
|
steps:
|
||||||
|
- label: "Ascend NPU Test"
|
||||||
|
soft_fail: true
|
||||||
|
timeout_in_minutes: 20
|
||||||
|
no_plugin: true
|
||||||
|
device: ascend_npu
|
||||||
|
commands:
|
||||||
|
- bash .buildkite/scripts/hardware_ci/run-npu-test.sh
|
||||||
10
.buildkite/hardware_tests/gh200.yaml
Normal file
10
.buildkite/hardware_tests/gh200.yaml
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
group: Hardware
|
||||||
|
steps:
|
||||||
|
- label: "GH200 Test"
|
||||||
|
soft_fail: true
|
||||||
|
device: gh200
|
||||||
|
no_plugin: true
|
||||||
|
optional: true
|
||||||
|
commands:
|
||||||
|
- nvidia-smi
|
||||||
|
- bash .buildkite/scripts/hardware_ci/run-gh200-test.sh
|
||||||
24
.buildkite/hardware_tests/intel.yaml
Normal file
24
.buildkite/hardware_tests/intel.yaml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
group: Hardware
|
||||||
|
depends_on: ~
|
||||||
|
steps:
|
||||||
|
- label: "Intel CPU Test"
|
||||||
|
soft_fail: true
|
||||||
|
device: intel_cpu
|
||||||
|
no_plugin: true
|
||||||
|
commands:
|
||||||
|
- bash .buildkite/scripts/hardware_ci/run-cpu-test.sh
|
||||||
|
|
||||||
|
- label: "Intel HPU Test"
|
||||||
|
soft_fail: true
|
||||||
|
device: intel_hpu
|
||||||
|
no_plugin: true
|
||||||
|
commands:
|
||||||
|
- bash .buildkite/scripts/hardware_ci/run-hpu-test.sh
|
||||||
|
|
||||||
|
- label: "Intel GPU Test"
|
||||||
|
depends_on: []
|
||||||
|
soft_fail: true
|
||||||
|
device: intel_gpu
|
||||||
|
no_plugin: true
|
||||||
|
commands:
|
||||||
|
- bash .buildkite/scripts/hardware_ci/run-xpu-test.sh
|
||||||
@@ -1,56 +1,256 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -e
|
set -euo pipefail
|
||||||
|
|
||||||
if [[ $# -lt 8 ]]; then
|
# replace invalid characters in Docker image tags and truncate to 128 chars
|
||||||
|
clean_docker_tag() {
|
||||||
|
local input="$1"
|
||||||
|
echo "$input" | sed 's/[^a-zA-Z0-9._-]/_/g' | cut -c1-128
|
||||||
|
}
|
||||||
|
|
||||||
|
print_usage_and_exit() {
|
||||||
echo "Usage: $0 <registry> <repo> <commit> <branch> <vllm_use_precompiled> <vllm_merge_base_commit> <cache_from> <cache_to>"
|
echo "Usage: $0 <registry> <repo> <commit> <branch> <vllm_use_precompiled> <vllm_merge_base_commit> <cache_from> <cache_to>"
|
||||||
exit 1
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
print_instance_info() {
|
||||||
|
echo ""
|
||||||
|
echo "=== Debug: Instance Information ==="
|
||||||
|
# Get IMDSv2 token
|
||||||
|
if TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" \
|
||||||
|
-H "X-aws-ec2-metadata-token-ttl-seconds: 21600" 2>/dev/null); then
|
||||||
|
AMI_ID=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" \
|
||||||
|
http://169.254.169.254/latest/meta-data/ami-id 2>/dev/null || echo "unknown")
|
||||||
|
INSTANCE_TYPE=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" \
|
||||||
|
http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null || echo "unknown")
|
||||||
|
INSTANCE_ID=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" \
|
||||||
|
http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null || echo "unknown")
|
||||||
|
AZ=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" \
|
||||||
|
http://169.254.169.254/latest/meta-data/placement/availability-zone 2>/dev/null || echo "unknown")
|
||||||
|
echo "AMI ID: ${AMI_ID}"
|
||||||
|
echo "Instance Type: ${INSTANCE_TYPE}"
|
||||||
|
echo "Instance ID: ${INSTANCE_ID}"
|
||||||
|
echo "AZ: ${AZ}"
|
||||||
|
else
|
||||||
|
echo "Not running on EC2 or IMDS not available"
|
||||||
|
fi
|
||||||
|
# Check for warm cache AMI (marker file baked into custom AMI)
|
||||||
|
if [[ -f /etc/vllm-ami-info ]]; then
|
||||||
|
echo "Cache: warm (custom vLLM AMI)"
|
||||||
|
cat /etc/vllm-ami-info
|
||||||
|
else
|
||||||
|
echo "Cache: cold (standard AMI)"
|
||||||
|
fi
|
||||||
|
echo "==================================="
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_buildx_builder() {
|
||||||
|
echo "--- :buildkite: Setting up buildx builder"
|
||||||
|
if [[ -S "${BUILDKIT_SOCKET}" ]]; then
|
||||||
|
# Custom AMI with standalone buildkitd - use remote driver for warm cache
|
||||||
|
echo "✅ Found local buildkitd socket at ${BUILDKIT_SOCKET}"
|
||||||
|
echo "Using remote driver to connect to buildkitd (warm cache available)"
|
||||||
|
if docker buildx inspect baked-vllm-builder >/dev/null 2>&1; then
|
||||||
|
echo "Using existing baked-vllm-builder"
|
||||||
|
docker buildx use baked-vllm-builder
|
||||||
|
else
|
||||||
|
echo "Creating baked-vllm-builder with remote driver"
|
||||||
|
docker buildx create \
|
||||||
|
--name baked-vllm-builder \
|
||||||
|
--driver remote \
|
||||||
|
--use \
|
||||||
|
"unix://${BUILDKIT_SOCKET}"
|
||||||
|
fi
|
||||||
|
docker buildx inspect --bootstrap
|
||||||
|
elif docker buildx inspect "${BUILDER_NAME}" >/dev/null 2>&1; then
|
||||||
|
# Existing builder available
|
||||||
|
echo "Using existing builder: ${BUILDER_NAME}"
|
||||||
|
docker buildx use "${BUILDER_NAME}"
|
||||||
|
docker buildx inspect --bootstrap
|
||||||
|
else
|
||||||
|
# No local buildkitd, no existing builder - create new docker-container builder
|
||||||
|
echo "No local buildkitd found, using docker-container driver"
|
||||||
|
docker buildx create --name "${BUILDER_NAME}" --driver docker-container --use
|
||||||
|
docker buildx inspect --bootstrap
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# builder info
|
||||||
|
echo "Active builder:"
|
||||||
|
docker buildx ls | grep -E '^\*|^NAME' || docker buildx ls
|
||||||
|
}
|
||||||
|
|
||||||
|
check_and_skip_if_image_exists() {
|
||||||
|
if [[ -n "${IMAGE_TAG:-}" ]]; then
|
||||||
|
echo "--- :mag: Checking if image exists"
|
||||||
|
if docker manifest inspect "${IMAGE_TAG}" >/dev/null 2>&1; then
|
||||||
|
echo "Image already exists: ${IMAGE_TAG}"
|
||||||
|
echo "Skipping build"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
echo "Image not found, proceeding with build"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
ecr_login() {
|
||||||
|
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin "$REGISTRY"
|
||||||
|
aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin 936637512419.dkr.ecr.us-east-1.amazonaws.com
|
||||||
|
}
|
||||||
|
|
||||||
|
prepare_cache_tags() {
|
||||||
|
# resolve and set: CACHE_TO, CACHE_FROM, CACHE_FROM_BASE_BRANCH, CACHE_FROM_MAIN
|
||||||
|
TEST_CACHE_ECR="936637512419.dkr.ecr.us-east-1.amazonaws.com/vllm-ci-test-cache"
|
||||||
|
MAIN_CACHE_ECR="936637512419.dkr.ecr.us-east-1.amazonaws.com/vllm-ci-postmerge-cache"
|
||||||
|
|
||||||
|
if [[ "$BUILDKITE_PULL_REQUEST" == "false" ]]; then
|
||||||
|
if [[ "$BUILDKITE_BRANCH" == "main" ]]; then
|
||||||
|
cache="${MAIN_CACHE_ECR}:latest"
|
||||||
|
else
|
||||||
|
clean_branch=$(clean_docker_tag "$BUILDKITE_BRANCH")
|
||||||
|
cache="${TEST_CACHE_ECR}:${clean_branch}"
|
||||||
|
fi
|
||||||
|
CACHE_TO="$cache"
|
||||||
|
CACHE_FROM="$cache"
|
||||||
|
CACHE_FROM_BASE_BRANCH="$cache"
|
||||||
|
else
|
||||||
|
CACHE_TO="${TEST_CACHE_ECR}:pr-${BUILDKITE_PULL_REQUEST}"
|
||||||
|
CACHE_FROM="${TEST_CACHE_ECR}:pr-${BUILDKITE_PULL_REQUEST}"
|
||||||
|
if [[ "$BUILDKITE_PULL_REQUEST_BASE_BRANCH" == "main" ]]; then
|
||||||
|
CACHE_FROM_BASE_BRANCH="${MAIN_CACHE_ECR}:latest"
|
||||||
|
else
|
||||||
|
clean_base=$(clean_docker_tag "$BUILDKITE_PULL_REQUEST_BASE_BRANCH")
|
||||||
|
CACHE_FROM_BASE_BRANCH="${TEST_CACHE_ECR}:${clean_base}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
CACHE_FROM_MAIN="${MAIN_CACHE_ECR}:latest"
|
||||||
|
export CACHE_TO CACHE_FROM CACHE_FROM_BASE_BRANCH CACHE_FROM_MAIN
|
||||||
|
}
|
||||||
|
|
||||||
|
resolve_parent_commit() {
|
||||||
|
if [[ -z "${PARENT_COMMIT:-}" ]]; then
|
||||||
|
PARENT_COMMIT=$(git rev-parse HEAD~1 2>/dev/null || echo "")
|
||||||
|
if [[ -n "${PARENT_COMMIT}" ]]; then
|
||||||
|
echo "Computed parent commit for cache fallback: ${PARENT_COMMIT}"
|
||||||
|
export PARENT_COMMIT
|
||||||
|
else
|
||||||
|
echo "Could not determine parent commit (may be first commit in repo)"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Using provided PARENT_COMMIT: ${PARENT_COMMIT}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
print_bake_config() {
|
||||||
|
echo "--- :page_facing_up: Resolved bake configuration"
|
||||||
|
BAKE_CONFIG_FILE="bake-config-build-${BUILDKITE_BUILD_NUMBER:-local}.json"
|
||||||
|
docker buildx bake -f "${VLLM_BAKE_FILE_PATH}" -f "${CI_HCL_PATH}" --print "${TARGET}" | tee "${BAKE_CONFIG_FILE}" || true
|
||||||
|
echo "Saved bake config to ${BAKE_CONFIG_FILE}"
|
||||||
|
echo "--- :arrow_down: Uploading bake config to Buildkite"
|
||||||
|
buildkite-agent artifact upload "${BAKE_CONFIG_FILE}"
|
||||||
|
}
|
||||||
|
|
||||||
|
#################################
|
||||||
|
# Main Script #
|
||||||
|
#################################
|
||||||
|
print_instance_info
|
||||||
|
|
||||||
|
if [[ $# -lt 7 ]]; then
|
||||||
|
print_usage_and_exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# input args
|
||||||
REGISTRY=$1
|
REGISTRY=$1
|
||||||
REPO=$2
|
REPO=$2
|
||||||
BUILDKITE_COMMIT=$3
|
BUILDKITE_COMMIT=$3
|
||||||
BRANCH=$4
|
BRANCH=$4
|
||||||
VLLM_USE_PRECOMPILED=$5
|
VLLM_USE_PRECOMPILED=$5
|
||||||
VLLM_MERGE_BASE_COMMIT=$6
|
VLLM_MERGE_BASE_COMMIT=$6
|
||||||
CACHE_FROM=$7
|
IMAGE_TAG=$7
|
||||||
CACHE_TO=$8
|
IMAGE_TAG_LATEST=${8:-} # only used for main branch, optional
|
||||||
|
|
||||||
# authenticate with AWS ECR
|
# build config
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin $REGISTRY
|
TARGET="test-ci"
|
||||||
aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin 936637512419.dkr.ecr.us-east-1.amazonaws.com
|
VLLM_BAKE_FILE_PATH="${VLLM_BAKE_FILE_PATH:-docker/docker-bake.hcl}"
|
||||||
|
BUILDER_NAME="${BUILDER_NAME:-vllm-builder}"
|
||||||
|
CI_HCL_URL="${CI_HCL_URL:-https://raw.githubusercontent.com/vllm-project/ci-infra/main/docker/ci.hcl}"
|
||||||
|
CI_HCL_PATH="/tmp/ci.hcl"
|
||||||
|
BUILDKIT_SOCKET="/run/buildkit/buildkitd.sock"
|
||||||
|
|
||||||
# docker buildx
|
prepare_cache_tags
|
||||||
docker buildx create --name vllm-builder --driver docker-container --use
|
ecr_login
|
||||||
docker buildx inspect --bootstrap
|
|
||||||
docker buildx ls
|
|
||||||
|
|
||||||
# skip build if image already exists
|
# Environment info (for docs and human readers)
|
||||||
if [[ -z $(docker manifest inspect $REGISTRY/$REPO:$BUILDKITE_COMMIT) ]]; then
|
# VLLM_CI_BRANCH - ci-infra branch to use (default: main)
|
||||||
echo "Image not found, proceeding with build..."
|
# VLLM_BAKE_FILE_PATH - Path to vLLM's bake file (default: docker/docker-bake.hcl)
|
||||||
else
|
# BUILDER_NAME - Name for buildx builder (default: vllm-builder)
|
||||||
echo "Image found"
|
#
|
||||||
exit 0
|
# Build configuration (exported as environment variables for bake):
|
||||||
|
export BUILDKITE_COMMIT
|
||||||
|
export PARENT_COMMIT
|
||||||
|
export IMAGE_TAG
|
||||||
|
export IMAGE_TAG_LATEST
|
||||||
|
export CACHE_FROM
|
||||||
|
export CACHE_FROM_BASE_BRANCH
|
||||||
|
export CACHE_FROM_MAIN
|
||||||
|
export CACHE_TO
|
||||||
|
export VLLM_USE_PRECOMPILED
|
||||||
|
export VLLM_MERGE_BASE_COMMIT
|
||||||
|
|
||||||
|
# print args
|
||||||
|
echo "--- :mag: Arguments"
|
||||||
|
echo "REGISTRY: ${REGISTRY}"
|
||||||
|
echo "REPO: ${REPO}"
|
||||||
|
echo "BUILDKITE_COMMIT: ${BUILDKITE_COMMIT}"
|
||||||
|
echo "BRANCH: ${BRANCH}"
|
||||||
|
echo "VLLM_USE_PRECOMPILED: ${VLLM_USE_PRECOMPILED}"
|
||||||
|
echo "VLLM_MERGE_BASE_COMMIT: ${VLLM_MERGE_BASE_COMMIT}"
|
||||||
|
echo "IMAGE_TAG: ${IMAGE_TAG}"
|
||||||
|
echo "IMAGE_TAG_LATEST: ${IMAGE_TAG_LATEST}"
|
||||||
|
|
||||||
|
# print build configuration
|
||||||
|
echo "--- :mag: Build configuration"
|
||||||
|
echo "TARGET: ${TARGET}"
|
||||||
|
echo "vLLM bake file: ${VLLM_BAKE_FILE_PATH}"
|
||||||
|
echo "BUILDER_NAME: ${BUILDER_NAME}"
|
||||||
|
echo "CI_HCL_URL: ${CI_HCL_URL}"
|
||||||
|
echo "BUILDKIT_SOCKET: ${BUILDKIT_SOCKET}"
|
||||||
|
|
||||||
|
echo "--- :mag: Cache tags"
|
||||||
|
echo "CACHE_TO: ${CACHE_TO}"
|
||||||
|
echo "CACHE_FROM: ${CACHE_FROM}"
|
||||||
|
echo "CACHE_FROM_BASE_BRANCH: ${CACHE_FROM_BASE_BRANCH}"
|
||||||
|
echo "CACHE_FROM_MAIN: ${CACHE_FROM_MAIN}"
|
||||||
|
|
||||||
|
check_and_skip_if_image_exists
|
||||||
|
|
||||||
|
echo "--- :docker: Setting up Docker buildx bake"
|
||||||
|
echo "Target: ${TARGET}"
|
||||||
|
echo "vLLM bake file: ${VLLM_BAKE_FILE_PATH}"
|
||||||
|
echo "CI HCL path: ${CI_HCL_PATH}"
|
||||||
|
|
||||||
|
if [[ ! -f "${VLLM_BAKE_FILE_PATH}" ]]; then
|
||||||
|
echo "Error: vLLM bake file not found at ${VLLM_BAKE_FILE_PATH}"
|
||||||
|
echo "Make sure you're running from the vLLM repository root"
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${VLLM_USE_PRECOMPILED:-0}" == "1" ]]; then
|
echo "--- :arrow_down: Downloading ci.hcl"
|
||||||
merge_base_commit_build_args="--build-arg VLLM_MERGE_BASE_COMMIT=${VLLM_MERGE_BASE_COMMIT}"
|
curl -sSfL -o "${CI_HCL_PATH}" "${CI_HCL_URL}"
|
||||||
else
|
echo "Downloaded to ${CI_HCL_PATH}"
|
||||||
merge_base_commit_build_args=""
|
|
||||||
|
if [[ ! -f "${CI_HCL_PATH}" ]]; then
|
||||||
|
echo "Error: ci.hcl not found at ${CI_HCL_PATH}"
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# build
|
setup_buildx_builder
|
||||||
docker buildx build --file docker/Dockerfile \
|
|
||||||
--build-arg max_jobs=16 \
|
resolve_parent_commit
|
||||||
--build-arg buildkite_commit=$BUILDKITE_COMMIT \
|
export PARENT_COMMIT
|
||||||
--build-arg USE_SCCACHE=1 \
|
|
||||||
--build-arg TORCH_CUDA_ARCH_LIST="8.0 8.9 9.0 10.0" \
|
print_bake_config
|
||||||
--build-arg FI_TORCH_CUDA_ARCH_LIST="8.0 8.9 9.0a 10.0a" \
|
|
||||||
--build-arg VLLM_USE_PRECOMPILED="${VLLM_USE_PRECOMPILED:-0}" \
|
echo "--- :docker: Building ${TARGET}"
|
||||||
${merge_base_commit_build_args} \
|
docker --debug buildx bake -f "${VLLM_BAKE_FILE_PATH}" -f "${CI_HCL_PATH}" --progress plain "${TARGET}"
|
||||||
--cache-from type=registry,ref=${CACHE_FROM},mode=max \
|
|
||||||
--cache-to type=registry,ref=${CACHE_TO},mode=max \
|
echo "--- :white_check_mark: Build complete"
|
||||||
--tag ${REGISTRY}/${REPO}:${BUILDKITE_COMMIT} \
|
|
||||||
$( [[ "${BRANCH}" == "main" ]] && echo "--tag ${REGISTRY}/${REPO}:latest" ) \
|
|
||||||
--push \
|
|
||||||
--target test \
|
|
||||||
--progress plain .
|
|
||||||
|
|||||||
@@ -4,7 +4,8 @@ steps:
|
|||||||
key: image-build
|
key: image-build
|
||||||
depends_on: []
|
depends_on: []
|
||||||
commands:
|
commands:
|
||||||
- .buildkite/image_build/image_build.sh $REGISTRY $REPO $BUILDKITE_COMMIT $BRANCH $VLLM_USE_PRECOMPILED $VLLM_MERGE_BASE_COMMIT $CACHE_FROM $CACHE_TO
|
- if [[ "$BUILDKITE_BRANCH" != "main" ]]; then .buildkite/image_build/image_build.sh $REGISTRY $REPO $BUILDKITE_COMMIT $BRANCH $VLLM_USE_PRECOMPILED $VLLM_MERGE_BASE_COMMIT $IMAGE_TAG; fi
|
||||||
|
- if [[ "$BUILDKITE_BRANCH" == "main" ]]; then .buildkite/image_build/image_build.sh $REGISTRY $REPO $BUILDKITE_COMMIT $BRANCH $VLLM_USE_PRECOMPILED $VLLM_MERGE_BASE_COMMIT $IMAGE_TAG $IMAGE_TAG_LATEST; fi
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
- exit_status: -1 # Agent was lost
|
- exit_status: -1 # Agent was lost
|
||||||
|
|||||||
@@ -393,7 +393,7 @@ if __name__ == "__main__":
|
|||||||
with open(results_folder / md_file, "w") as f:
|
with open(results_folder / md_file, "w") as f:
|
||||||
results = read_markdown(
|
results = read_markdown(
|
||||||
"../.buildkite/performance-benchmarks/"
|
"../.buildkite/performance-benchmarks/"
|
||||||
+ "performance-benchmarks-descriptions.md"
|
"performance-benchmarks-descriptions.md"
|
||||||
)
|
)
|
||||||
results = results.format(
|
results = results.format(
|
||||||
latency_tests_markdown_table=latency_md_table,
|
latency_tests_markdown_table=latency_md_table,
|
||||||
|
|||||||
@@ -181,19 +181,20 @@ upload_to_buildkite() {
|
|||||||
$BUILDKITE_AGENT_COMMAND artifact upload "$RESULTS_FOLDER/*"
|
$BUILDKITE_AGENT_COMMAND artifact upload "$RESULTS_FOLDER/*"
|
||||||
}
|
}
|
||||||
|
|
||||||
run_latency_tests() {
|
run_benchmark_tests() {
|
||||||
# run latency tests using `vllm bench latency` command
|
# run benchmark tests using `vllm bench <test_type>` command
|
||||||
# $1: a json file specifying latency test cases
|
# $1: test type (latency or throughput)
|
||||||
|
# $2: a json file specifying test cases
|
||||||
|
|
||||||
local latency_test_file
|
local test_type=$1
|
||||||
latency_test_file=$1
|
local test_file=$2
|
||||||
|
|
||||||
# Iterate over latency tests
|
# Iterate over tests
|
||||||
jq -c '.[]' "$latency_test_file" | while read -r params; do
|
jq -c '.[]' "$test_file" | while read -r params; do
|
||||||
# get the test name, and append the GPU type back to it.
|
# get the test name, and append the GPU type back to it.
|
||||||
test_name=$(echo "$params" | jq -r '.test_name')
|
test_name=$(echo "$params" | jq -r '.test_name')
|
||||||
if [[ ! "$test_name" =~ ^latency_ ]]; then
|
if [[ ! "$test_name" =~ ^${test_type}_ ]]; then
|
||||||
echo "In latency-test.json, test_name must start with \"latency_\"."
|
echo "In ${test_type}-test.json, test_name must start with \"${test_type}_\"."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -204,15 +205,15 @@ run_latency_tests() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# get arguments
|
# get arguments
|
||||||
latency_params=$(echo "$params" | jq -r '.parameters')
|
bench_params=$(echo "$params" | jq -r '.parameters')
|
||||||
latency_args=$(json2args "$latency_params")
|
bench_args=$(json2args "$bench_params")
|
||||||
latency_environment_variables=$(echo "$params" | jq -r '.environment_variables')
|
bench_environment_variables=$(echo "$params" | jq -r '.environment_variables')
|
||||||
latency_envs=$(json2envs "$latency_environment_variables")
|
bench_envs=$(json2envs "$bench_environment_variables")
|
||||||
|
|
||||||
# check if there is enough GPU to run the test
|
# check if there is enough GPU to run the test
|
||||||
tp=$(echo "$latency_params" | jq -r '.tensor_parallel_size')
|
tp=$(echo "$bench_params" | jq -r '.tensor_parallel_size')
|
||||||
if [[ "$ON_CPU" == "1" ]]; then
|
if [[ "$ON_CPU" == "1" ]]; then
|
||||||
pp=$(echo "$latency_params" | jq -r '.pipeline_parallel_size // 1')
|
pp=$(echo "$bench_params" | jq -r '.pipeline_parallel_size // 1')
|
||||||
world_size=$(($tp*$pp))
|
world_size=$(($tp*$pp))
|
||||||
if [[ $numa_count -lt $world_size && -z "${REMOTE_HOST}" ]]; then
|
if [[ $numa_count -lt $world_size && -z "${REMOTE_HOST}" ]]; then
|
||||||
echo "Required world-size $world_size but only $numa_count NUMA nodes found. Skip testcase $test_name."
|
echo "Required world-size $world_size but only $numa_count NUMA nodes found. Skip testcase $test_name."
|
||||||
@@ -225,97 +226,42 @@ run_latency_tests() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
latency_command=" $latency_envs vllm bench latency \
|
bench_command=" $bench_envs vllm bench $test_type \
|
||||||
--output-json $RESULTS_FOLDER/${test_name}.json \
|
--output-json $RESULTS_FOLDER/${test_name}.json \
|
||||||
$latency_args"
|
$bench_args"
|
||||||
|
|
||||||
echo "Running test case $test_name"
|
echo "Running test case $test_name"
|
||||||
echo "Latency command: $latency_command"
|
echo "${test_type^} command: $bench_command"
|
||||||
|
|
||||||
# recoding benchmarking command ang GPU command
|
# recording benchmarking command and GPU command
|
||||||
jq_output=$(jq -n \
|
jq_output=$(jq -n \
|
||||||
--arg latency "$latency_command" \
|
--arg command "$bench_command" \
|
||||||
--arg gpu "$gpu_type" \
|
--arg gpu "$gpu_type" \
|
||||||
|
--arg test_type "$test_type" \
|
||||||
'{
|
'{
|
||||||
latency_command: $latency,
|
($test_type + "_command"): $command,
|
||||||
gpu_type: $gpu
|
gpu_type: $gpu
|
||||||
}')
|
}')
|
||||||
echo "$jq_output" >"$RESULTS_FOLDER/$test_name.commands"
|
echo "$jq_output" >"$RESULTS_FOLDER/$test_name.commands"
|
||||||
|
|
||||||
# run the benchmark
|
# run the benchmark
|
||||||
eval "$latency_command"
|
eval "$bench_command"
|
||||||
|
|
||||||
kill_gpu_processes
|
kill_gpu_processes
|
||||||
|
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
run_latency_tests() {
|
||||||
|
run_benchmark_tests "latency" "$1"
|
||||||
|
}
|
||||||
|
|
||||||
|
run_startup_tests() {
|
||||||
|
run_benchmark_tests "startup" "$1"
|
||||||
|
}
|
||||||
|
|
||||||
run_throughput_tests() {
|
run_throughput_tests() {
|
||||||
# run throughput tests using `vllm bench throughput`
|
run_benchmark_tests "throughput" "$1"
|
||||||
# $1: a json file specifying throughput test cases
|
|
||||||
|
|
||||||
local throughput_test_file
|
|
||||||
throughput_test_file=$1
|
|
||||||
|
|
||||||
# Iterate over throughput tests
|
|
||||||
jq -c '.[]' "$throughput_test_file" | while read -r params; do
|
|
||||||
# get the test name, and append the GPU type back to it.
|
|
||||||
test_name=$(echo "$params" | jq -r '.test_name')
|
|
||||||
if [[ ! "$test_name" =~ ^throughput_ ]]; then
|
|
||||||
echo "In throughput-test.json, test_name must start with \"throughput_\"."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
|
||||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
|
||||||
echo "Skip test case $test_name."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# get arguments
|
|
||||||
throughput_params=$(echo "$params" | jq -r '.parameters')
|
|
||||||
throughput_args=$(json2args "$throughput_params")
|
|
||||||
throughput_environment_variables=$(echo "$params" | jq -r '.environment_variables')
|
|
||||||
throughput_envs=$(json2envs "$throughput_environment_variables")
|
|
||||||
|
|
||||||
# check if there is enough GPU to run the test
|
|
||||||
tp=$(echo "$throughput_params" | jq -r '.tensor_parallel_size')
|
|
||||||
if [[ "$ON_CPU" == "1" ]]; then
|
|
||||||
pp=$(echo "$throughput_params" | jq -r '.pipeline_parallel_size // 1')
|
|
||||||
world_size=$(($tp*$pp))
|
|
||||||
if [[ $numa_count -lt $world_size && -z "${REMOTE_HOST}" ]]; then
|
|
||||||
echo "Required world-size $world_size but only $numa_count NUMA nodes found. Skip testcase $test_name."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
if [[ $gpu_count -lt $tp ]]; then
|
|
||||||
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
throughput_command=" $throughput_envs vllm bench throughput \
|
|
||||||
--output-json $RESULTS_FOLDER/${test_name}.json \
|
|
||||||
$throughput_args"
|
|
||||||
|
|
||||||
echo "Running test case $test_name"
|
|
||||||
echo "Throughput command: $throughput_command"
|
|
||||||
# recoding benchmarking command ang GPU command
|
|
||||||
jq_output=$(jq -n \
|
|
||||||
--arg command "$throughput_command" \
|
|
||||||
--arg gpu "$gpu_type" \
|
|
||||||
'{
|
|
||||||
throughput_command: $command,
|
|
||||||
gpu_type: $gpu
|
|
||||||
}')
|
|
||||||
echo "$jq_output" >"$RESULTS_FOLDER/$test_name.commands"
|
|
||||||
|
|
||||||
# run the benchmark
|
|
||||||
eval "$throughput_command"
|
|
||||||
|
|
||||||
kill_gpu_processes
|
|
||||||
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
run_serving_tests() {
|
run_serving_tests() {
|
||||||
@@ -534,6 +480,7 @@ main() {
|
|||||||
# benchmarking
|
# benchmarking
|
||||||
run_serving_tests $QUICK_BENCHMARK_ROOT/tests/"${SERVING_JSON:-serving-tests$ARCH.json}"
|
run_serving_tests $QUICK_BENCHMARK_ROOT/tests/"${SERVING_JSON:-serving-tests$ARCH.json}"
|
||||||
run_latency_tests $QUICK_BENCHMARK_ROOT/tests/"${LATENCY_JSON:-latency-tests$ARCH.json}"
|
run_latency_tests $QUICK_BENCHMARK_ROOT/tests/"${LATENCY_JSON:-latency-tests$ARCH.json}"
|
||||||
|
run_startup_tests $QUICK_BENCHMARK_ROOT/tests/"${STARTUP_JSON:-startup-tests$ARCH.json}"
|
||||||
run_throughput_tests $QUICK_BENCHMARK_ROOT/tests/"${THROUGHPUT_JSON:-throughput-tests$ARCH.json}"
|
run_throughput_tests $QUICK_BENCHMARK_ROOT/tests/"${THROUGHPUT_JSON:-throughput-tests$ARCH.json}"
|
||||||
|
|
||||||
# postprocess benchmarking results
|
# postprocess benchmarking results
|
||||||
|
|||||||
@@ -176,23 +176,6 @@ steps:
|
|||||||
env:
|
env:
|
||||||
DOCKER_BUILDKIT: "1"
|
DOCKER_BUILDKIT: "1"
|
||||||
|
|
||||||
- block: "Build release image for x86_64 ROCm"
|
|
||||||
key: block-rocm-release-image-build
|
|
||||||
depends_on: ~
|
|
||||||
|
|
||||||
- label: "Build release image - x86_64 - ROCm"
|
|
||||||
depends_on: block-rocm-release-image-build
|
|
||||||
id: build-release-image-rocm
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
|
||||||
# Build base image first
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --tag rocm/vllm-dev:base-$BUILDKITE_COMMIT --target final --progress plain -f docker/Dockerfile.rocm_base ."
|
|
||||||
# Build vLLM ROCm image using the base
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg BASE_IMAGE=rocm/vllm-dev:base-$BUILDKITE_COMMIT --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-rocm --target vllm-openai --progress plain -f docker/Dockerfile.rocm ."
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-rocm"
|
|
||||||
|
|
||||||
- group: "Publish release images"
|
- group: "Publish release images"
|
||||||
key: "publish-release-images"
|
key: "publish-release-images"
|
||||||
steps:
|
steps:
|
||||||
@@ -274,14 +257,14 @@ steps:
|
|||||||
- input-release-version
|
- input-release-version
|
||||||
- build-wheels
|
- build-wheels
|
||||||
|
|
||||||
- label: "Upload release wheels to PyPI and GitHub"
|
- label: "Upload release wheels to PyPI"
|
||||||
depends_on:
|
depends_on:
|
||||||
- block-upload-release-wheels
|
- block-upload-release-wheels
|
||||||
id: upload-release-wheels
|
id: upload-release-wheels
|
||||||
agents:
|
agents:
|
||||||
queue: small_cpu_queue_postmerge
|
queue: small_cpu_queue_postmerge
|
||||||
commands:
|
commands:
|
||||||
- "bash .buildkite/scripts/upload-release-wheels.sh"
|
- "bash .buildkite/scripts/upload-release-wheels-pypi.sh"
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# ROCm Release Pipeline (x86_64 only)
|
# ROCm Release Pipeline (x86_64 only)
|
||||||
@@ -476,7 +459,7 @@ steps:
|
|||||||
S3_BUCKET: "vllm-wheels"
|
S3_BUCKET: "vllm-wheels"
|
||||||
|
|
||||||
# ROCm Job 2: Build vLLM ROCm Wheel
|
# ROCm Job 2: Build vLLM ROCm Wheel
|
||||||
- label: ":python: Build vLLM ROCm Wheel"
|
- label: ":python: Build vLLM ROCm Wheel - x86_64"
|
||||||
id: build-rocm-vllm-wheel
|
id: build-rocm-vllm-wheel
|
||||||
depends_on:
|
depends_on:
|
||||||
- step: build-rocm-base-wheels
|
- step: build-rocm-base-wheels
|
||||||
@@ -638,9 +621,93 @@ steps:
|
|||||||
depends_on:
|
depends_on:
|
||||||
- step: upload-rocm-wheels
|
- step: upload-rocm-wheels
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
|
- step: input-release-version
|
||||||
|
allow_failure: true
|
||||||
agents:
|
agents:
|
||||||
queue: cpu_queue_postmerge
|
queue: cpu_queue_postmerge
|
||||||
commands:
|
commands:
|
||||||
- "bash .buildkite/scripts/annotate-rocm-release.sh"
|
- "bash .buildkite/scripts/annotate-rocm-release.sh"
|
||||||
env:
|
env:
|
||||||
S3_BUCKET: "vllm-wheels"
|
S3_BUCKET: "vllm-wheels"
|
||||||
|
|
||||||
|
# ROCm Job 5: Generate Root Index for ROCm Wheels (for release only)
|
||||||
|
# This is the job to create https://wheels.vllm.ai/rocm/ index allowing
|
||||||
|
# users to install with `uv pip install vllm --extra-index-url https://wheels.vllm.ai/rocm/`
|
||||||
|
- block: "Generate Root Index for ROCm Wheels for Release"
|
||||||
|
key: block-generate-root-index-rocm-wheels
|
||||||
|
depends_on: upload-rocm-wheels
|
||||||
|
|
||||||
|
- label: ":package: Generate Root Index for ROCm Wheels for Release"
|
||||||
|
depends_on: block-generate-root-index-rocm-wheels
|
||||||
|
id: generate-root-index-rocm-wheels
|
||||||
|
agents:
|
||||||
|
queue: cpu_queue_postmerge
|
||||||
|
commands:
|
||||||
|
- "bash tools/vllm-rocm/generate-rocm-wheels-root-index.sh"
|
||||||
|
env:
|
||||||
|
S3_BUCKET: "vllm-wheels"
|
||||||
|
VARIANT: "rocm700"
|
||||||
|
|
||||||
|
# ROCm Job 5: Build ROCm Release Docker Image
|
||||||
|
- label: ":docker: Build release image - x86_64 - ROCm"
|
||||||
|
id: build-rocm-release-image
|
||||||
|
depends_on:
|
||||||
|
- step: build-rocm-base-wheels
|
||||||
|
allow_failure: false
|
||||||
|
agents:
|
||||||
|
queue: cpu_queue_postmerge
|
||||||
|
timeout_in_minutes: 60
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Login to ECR
|
||||||
|
aws ecr-public get-login-password --region us-east-1 | \
|
||||||
|
docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7
|
||||||
|
|
||||||
|
# Download Docker image from S3 (set by build-rocm-base-wheels)
|
||||||
|
DOCKER_IMAGE_S3_PATH="$$(buildkite-agent meta-data get rocm-docker-image-s3-path 2>/dev/null || echo '')"
|
||||||
|
if [ -z "$${DOCKER_IMAGE_S3_PATH}" ]; then
|
||||||
|
echo "ERROR: rocm-docker-image-s3-path metadata not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Downloading base image from $${DOCKER_IMAGE_S3_PATH}"
|
||||||
|
mkdir -p artifacts/rocm-docker-image
|
||||||
|
aws s3 cp "$${DOCKER_IMAGE_S3_PATH}" artifacts/rocm-docker-image/rocm-base-image.tar.gz
|
||||||
|
|
||||||
|
# Load base Docker image
|
||||||
|
echo "Loading base Docker image..."
|
||||||
|
LOAD_OUTPUT=$$(gunzip -c artifacts/rocm-docker-image/rocm-base-image.tar.gz | docker load)
|
||||||
|
BASE_IMAGE_TAG=$$(echo "$${LOAD_OUTPUT}" | grep "Loaded image:" | sed 's/Loaded image: //')
|
||||||
|
echo "Loaded base image: $${BASE_IMAGE_TAG}"
|
||||||
|
|
||||||
|
# Tag and push the base image to ECR
|
||||||
|
docker tag "$${BASE_IMAGE_TAG}" public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm-base
|
||||||
|
docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm-base
|
||||||
|
echo "Pushed base image: public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm-base"
|
||||||
|
|
||||||
|
# Get GPU architectures from meta-data
|
||||||
|
PYTORCH_ROCM_ARCH="$$(buildkite-agent meta-data get rocm-pytorch-rocm-arch 2>/dev/null || echo '')"
|
||||||
|
PYTORCH_ROCM_ARCH="$${PYTORCH_ROCM_ARCH:-gfx90a;gfx942;gfx950;gfx1100;gfx1101;gfx1200;gfx1201;gfx1150;gfx1151}"
|
||||||
|
|
||||||
|
# Build vLLM ROCm release image using cached base
|
||||||
|
DOCKER_BUILDKIT=1 docker build \
|
||||||
|
--build-arg max_jobs=16 \
|
||||||
|
--build-arg BASE_IMAGE="$${BASE_IMAGE_TAG}" \
|
||||||
|
--build-arg ARG_PYTORCH_ROCM_ARCH="$${PYTORCH_ROCM_ARCH}" \
|
||||||
|
--build-arg USE_SCCACHE=1 \
|
||||||
|
--build-arg SCCACHE_BUCKET_NAME=vllm-build-sccache \
|
||||||
|
--build-arg SCCACHE_REGION_NAME=us-west-2 \
|
||||||
|
--build-arg SCCACHE_S3_NO_CREDENTIALS=0 \
|
||||||
|
--tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm \
|
||||||
|
--target vllm-openai \
|
||||||
|
--progress plain \
|
||||||
|
-f docker/Dockerfile.rocm .
|
||||||
|
|
||||||
|
# Push to ECR
|
||||||
|
docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm
|
||||||
|
echo "Pushed: public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm"
|
||||||
|
env:
|
||||||
|
DOCKER_BUILDKIT: "1"
|
||||||
|
S3_BUCKET: "vllm-wheels"
|
||||||
|
|||||||
@@ -11,51 +11,80 @@ fi
|
|||||||
buildkite-agent annotate --style 'info' --context 'release-workflow' << EOF
|
buildkite-agent annotate --style 'info' --context 'release-workflow' << EOF
|
||||||
To download the wheel (by commit):
|
To download the wheel (by commit):
|
||||||
\`\`\`
|
\`\`\`
|
||||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux1_x86_64.whl .
|
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux_2_31_x86_64.whl .
|
||||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux2014_aarch64.whl .
|
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux_2_31_aarch64.whl .
|
||||||
|
|
||||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cu129-cp38-abi3-manylinux1_x86_64.whl .
|
(Optional) For CUDA 13.0:
|
||||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cu129-cp38-abi3-manylinux1_x86_64.whl .
|
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cu130-cp38-abi3-manylinux_2_35_x86_64.whl .
|
||||||
|
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cu130-cp38-abi3-manylinux_2_35_aarch64.whl .
|
||||||
|
|
||||||
|
(Optional) For CPU:
|
||||||
|
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cpu-cp38-abi3-manylinux_2_35_x86_64.whl .
|
||||||
|
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cpu-cp38-abi3-manylinux_2_35_aarch64.whl .
|
||||||
\`\`\`
|
\`\`\`
|
||||||
|
|
||||||
To download the wheel (by version):
|
|
||||||
\`\`\`
|
|
||||||
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux1_x86_64.whl .
|
|
||||||
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux2014_aarch64.whl .
|
|
||||||
|
|
||||||
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}+cu129/vllm-${RELEASE_VERSION}+cu129-cp38-abi3-manylinux1_x86_64.whl .
|
|
||||||
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}+cu130/vllm-${RELEASE_VERSION}+cu130-cp38-abi3-manylinux1_x86_64.whl .
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
To download and upload the image:
|
To download and upload the image:
|
||||||
|
|
||||||
\`\`\`
|
\`\`\`
|
||||||
|
Download images:
|
||||||
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64
|
||||||
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64-cu130
|
||||||
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64-cu130
|
||||||
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm
|
||||||
|
|
||||||
|
Tag and push images:
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64 vllm/vllm-openai:x86_64
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64 vllm/vllm-openai:x86_64
|
||||||
docker tag vllm/vllm-openai:x86_64 vllm/vllm-openai:latest-x86_64
|
docker tag vllm/vllm-openai:x86_64 vllm/vllm-openai:latest-x86_64
|
||||||
docker tag vllm/vllm-openai:x86_64 vllm/vllm-openai:v${RELEASE_VERSION}-x86_64
|
docker tag vllm/vllm-openai:x86_64 vllm/vllm-openai:v${RELEASE_VERSION}-x86_64
|
||||||
docker push vllm/vllm-openai:latest-x86_64
|
docker push vllm/vllm-openai:latest-x86_64
|
||||||
docker push vllm/vllm-openai:v${RELEASE_VERSION}-x86_64
|
docker push vllm/vllm-openai:v${RELEASE_VERSION}-x86_64
|
||||||
|
|
||||||
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64-cu130 vllm/vllm-openai:x86_64-cu130
|
||||||
|
docker tag vllm/vllm-openai:x86_64-cu130 vllm/vllm-openai:latest-x86_64-cu130
|
||||||
|
docker tag vllm/vllm-openai:x86_64-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-x86_64-cu130
|
||||||
|
docker push vllm/vllm-openai:latest-x86_64-cu130
|
||||||
|
docker push vllm/vllm-openai:v${RELEASE_VERSION}-x86_64-cu130
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64 vllm/vllm-openai:aarch64
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64 vllm/vllm-openai:aarch64
|
||||||
docker tag vllm/vllm-openai:aarch64 vllm/vllm-openai:latest-aarch64
|
docker tag vllm/vllm-openai:aarch64 vllm/vllm-openai:latest-aarch64
|
||||||
docker tag vllm/vllm-openai:aarch64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
docker tag vllm/vllm-openai:aarch64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
||||||
docker push vllm/vllm-openai:latest-aarch64
|
docker push vllm/vllm-openai:latest-aarch64
|
||||||
docker push vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
docker push vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai:rocm
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64-cu130 vllm/vllm-openai:aarch64-cu130
|
||||||
docker tag vllm/vllm-openai:rocm vllm/vllm-openai:latest-rocm
|
docker tag vllm/vllm-openai:aarch64-cu130 vllm/vllm-openai:latest-aarch64-cu130
|
||||||
docker tag vllm/vllm-openai:rocm vllm/vllm-openai:v${RELEASE_VERSION}-rocm
|
docker tag vllm/vllm-openai:aarch64-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64-cu130
|
||||||
docker push vllm/vllm-openai:latest-rocm
|
docker push vllm/vllm-openai:latest-aarch64-cu130
|
||||||
docker push vllm/vllm-openai:v${RELEASE_VERSION}-rocm
|
docker push vllm/vllm-openai:v${RELEASE_VERSION}-aarch64-cu130
|
||||||
|
|
||||||
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-rocm
|
||||||
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai-rocm:latest
|
||||||
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai-rocm:v${RELEASE_VERSION}-rocm
|
||||||
|
docker push vllm/vllm-openai-rocm:latest
|
||||||
|
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}-rocm
|
||||||
|
|
||||||
|
Create multi-arch manifest:
|
||||||
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base
|
||||||
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:latest-base
|
||||||
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
||||||
|
docker push vllm/vllm-openai-rocm:latest-base
|
||||||
|
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
||||||
|
|
||||||
docker manifest rm vllm/vllm-openai:latest
|
docker manifest rm vllm/vllm-openai:latest
|
||||||
docker manifest create vllm/vllm-openai:latest vllm/vllm-openai:latest-x86_64 vllm/vllm-openai:latest-aarch64
|
docker manifest create vllm/vllm-openai:latest vllm/vllm-openai:latest-x86_64 vllm/vllm-openai:latest-aarch64
|
||||||
docker manifest create vllm/vllm-openai:v${RELEASE_VERSION} vllm/vllm-openai:v${RELEASE_VERSION}-x86_64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
docker manifest create vllm/vllm-openai:v${RELEASE_VERSION} vllm/vllm-openai:v${RELEASE_VERSION}-x86_64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
||||||
docker manifest push vllm/vllm-openai:latest
|
docker manifest push vllm/vllm-openai:latest
|
||||||
docker manifest push vllm/vllm-openai:v${RELEASE_VERSION}
|
docker manifest push vllm/vllm-openai:v${RELEASE_VERSION}
|
||||||
|
|
||||||
|
docker manifest rm vllm/vllm-openai:latest-cu130
|
||||||
|
docker manifest create vllm/vllm-openai:latest-cu130 vllm/vllm-openai:latest-x86_64-cu130 vllm/vllm-openai:latest-aarch64-cu130
|
||||||
|
docker manifest create vllm/vllm-openai:v${RELEASE_VERSION}-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-x86_64-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64-cu130
|
||||||
|
docker manifest push vllm/vllm-openai:latest-cu130
|
||||||
|
docker manifest push vllm/vllm-openai:v${RELEASE_VERSION}-cu130
|
||||||
\`\`\`
|
\`\`\`
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
@@ -3,25 +3,32 @@
|
|||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
#
|
#
|
||||||
# Generate Buildkite annotation for ROCm wheel release
|
# Generate Buildkite annotation for ROCm wheel release
|
||||||
|
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
# Get build configuration from meta-data
|
# Get build configuration from meta-data
|
||||||
# Extract ROCm version dynamically from Dockerfile.rocm_base
|
# Extract ROCm version dynamically from Dockerfile.rocm_base
|
||||||
# BASE_IMAGE format: rocm/dev-ubuntu-22.04:7.1-complete -> extracts "7.1"
|
# BASE_IMAGE format: rocm/dev-ubuntu-22.04:7.0-complete -> extracts "7.0"
|
||||||
ROCM_VERSION=$(grep -E '^ARG BASE_IMAGE=' docker/Dockerfile.rocm_base | sed -E 's/.*:([0-9]+\.[0-9]+).*/\1/' || echo "unknown")
|
ROCM_VERSION=$(grep -E '^ARG BASE_IMAGE=' docker/Dockerfile.rocm_base | sed -E 's/.*:([0-9]+\.[0-9]+).*/\1/' || echo "unknown")
|
||||||
PYTHON_VERSION=$(buildkite-agent meta-data get rocm-python-version 2>/dev/null || echo "3.12")
|
PYTHON_VERSION=$(buildkite-agent meta-data get rocm-python-version 2>/dev/null || echo "3.12")
|
||||||
PYTORCH_ROCM_ARCH=$(buildkite-agent meta-data get rocm-pytorch-rocm-arch 2>/dev/null || echo "gfx90a;gfx942;gfx950;gfx1100;gfx1101;gfx1200;gfx1201;gfx1150;gfx1151")
|
PYTORCH_ROCM_ARCH=$(buildkite-agent meta-data get rocm-pytorch-rocm-arch 2>/dev/null || echo "gfx90a;gfx942;gfx950;gfx1100;gfx1101;gfx1200;gfx1201;gfx1150;gfx1151")
|
||||||
|
|
||||||
|
# TODO: Enable the nightly build for ROCm
|
||||||
|
# Get release version, default to 1.0.0.dev for nightly/per-commit builds
|
||||||
|
RELEASE_VERSION=$(buildkite-agent meta-data get release-version 2>/dev/null || echo "")
|
||||||
|
if [ -z "${RELEASE_VERSION}" ]; then
|
||||||
|
RELEASE_VERSION="1.0.0.dev"
|
||||||
|
fi
|
||||||
|
|
||||||
# S3 URLs
|
# S3 URLs
|
||||||
S3_BUCKET="${S3_BUCKET:-vllm-wheels}"
|
S3_BUCKET="${S3_BUCKET:-vllm-wheels}"
|
||||||
S3_REGION="${AWS_DEFAULT_REGION:-us-west-2}"
|
S3_REGION="${AWS_DEFAULT_REGION:-us-west-2}"
|
||||||
S3_URL="https://${S3_BUCKET}.s3.${S3_REGION}.amazonaws.com"
|
S3_URL="http://${S3_BUCKET}.s3-website-${S3_REGION}.amazonaws.com"
|
||||||
ROCM_PATH="rocm/${BUILDKITE_COMMIT}"
|
|
||||||
|
|
||||||
|
# Format ROCm version for path (e.g., "7.1" -> "rocm710")
|
||||||
|
ROCM_VERSION_PATH="rocm$(echo ${ROCM_VERSION} | tr -d '.')"
|
||||||
|
ROCM_PATH="rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}"
|
||||||
buildkite-agent annotate --style 'success' --context 'rocm-release-workflow' << EOF
|
buildkite-agent annotate --style 'success' --context 'rocm-release-workflow' << EOF
|
||||||
## :rocm: ROCm Wheel Release
|
## ROCm Wheel and Docker Image Releases
|
||||||
|
|
||||||
### Build Configuration
|
### Build Configuration
|
||||||
| Setting | Value |
|
| Setting | Value |
|
||||||
|---------|-------|
|
|---------|-------|
|
||||||
@@ -34,41 +41,72 @@ buildkite-agent annotate --style 'success' --context 'rocm-release-workflow' <<
|
|||||||
### :package: Installation
|
### :package: Installation
|
||||||
|
|
||||||
**Install from this build (by commit):**
|
**Install from this build (by commit):**
|
||||||
\`\`\`bash
|
|
||||||
uv pip install vllm --extra-index-url ${S3_URL}/${ROCM_PATH}/{rocm_variant}/
|
|
||||||
|
|
||||||
# Example:
|
\`\`\`bash
|
||||||
uv pip install vllm --extra-index-url ${S3_URL}/${ROCM_PATH}/rocm700/
|
pip install vllm --extra-index-url ${S3_URL}/${ROCM_PATH}/ --trusted-host ${S3_BUCKET}.s3-website-${S3_REGION}.amazonaws.com
|
||||||
|
|
||||||
|
# Example for ROCm ${ROCM_VERSION}:
|
||||||
|
pip install vllm --extra-index-url ${S3_URL}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/ --trusted-host ${S3_BUCKET}.s3-website-${S3_REGION}.amazonaws.com
|
||||||
\`\`\`
|
\`\`\`
|
||||||
|
|
||||||
**Install from nightly (if published):**
|
**Install from nightly (if published):**
|
||||||
|
|
||||||
\`\`\`bash
|
\`\`\`bash
|
||||||
uv pip install vllm --extra-index-url ${S3_URL}/rocm/nightly/
|
pip install vllm --extra-index-url ${S3_URL}/rocm/nightly/ --trusted-host ${S3_BUCKET}.s3-website-${S3_REGION}.amazonaws.com
|
||||||
\`\`\`
|
\`\`\`
|
||||||
|
|
||||||
### :floppy_disk: Download Wheels Directly
|
### :floppy_disk: Download Wheels Directly
|
||||||
|
|
||||||
\`\`\`bash
|
\`\`\`bash
|
||||||
# List all ROCm wheels
|
# List all ROCm wheels
|
||||||
aws s3 ls s3://${S3_BUCKET}/${ROCM_PATH}/
|
aws s3 ls s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/
|
||||||
|
|
||||||
# Download specific wheels
|
# Download specific wheels
|
||||||
aws s3 cp s3://${S3_BUCKET}/${ROCM_PATH}/vllm-*.whl .
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/vllm-*.whl .
|
||||||
aws s3 cp s3://${S3_BUCKET}/${ROCM_PATH}/torch-*.whl .
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/torch-*.whl .
|
||||||
aws s3 cp s3://${S3_BUCKET}/${ROCM_PATH}/triton_rocm-*.whl .
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/triton-*.whl .
|
||||||
aws s3 cp s3://${S3_BUCKET}/${ROCM_PATH}/torchvision-*.whl .
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/triton-kernels-*.whl .
|
||||||
aws s3 cp s3://${S3_BUCKET}/${ROCM_PATH}/amdsmi-*.whl .
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/torchvision-*.whl .
|
||||||
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/torchaudio-*.whl .
|
||||||
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/amdsmi-*.whl .
|
||||||
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/aiter-*.whl .
|
||||||
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/flash-attn-*.whl .
|
||||||
\`\`\`
|
\`\`\`
|
||||||
|
|
||||||
### :gear: Included Packages
|
### :gear: Included Packages
|
||||||
- **vllm**: vLLM with ROCm support
|
- **vllm**: vLLM with ROCm support
|
||||||
- **torch**: PyTorch built for ROCm ${ROCM_VERSION}
|
- **torch**: PyTorch built for ROCm ${ROCM_VERSION}
|
||||||
- **triton_rocm**: Triton built for ROCm
|
- **triton**: Triton
|
||||||
|
- **triton-kernels**: Triton kernels
|
||||||
- **torchvision**: TorchVision for ROCm PyTorch
|
- **torchvision**: TorchVision for ROCm PyTorch
|
||||||
|
- **torchaudio**: Torchaudio for ROCm PyTorch
|
||||||
- **amdsmi**: AMD SMI Python bindings
|
- **amdsmi**: AMD SMI Python bindings
|
||||||
|
- **aiter**: Aiter for ROCm
|
||||||
|
- **flash-attn**: Flash Attention for ROCm
|
||||||
|
|
||||||
### :warning: Notes
|
### :warning: Notes
|
||||||
- These wheels are built for **ROCm ${ROCM_VERSION}** and will NOT work with CUDA GPUs
|
- These wheels are built for **ROCm ${ROCM_VERSION}** and will NOT work with CUDA GPUs
|
||||||
- Supported GPU architectures: ${PYTORCH_ROCM_ARCH}
|
- Supported GPU architectures: ${PYTORCH_ROCM_ARCH}
|
||||||
- Platform: Linux x86_64 only
|
- Platform: Linux x86_64 only
|
||||||
|
|
||||||
|
### :package: Docker Image Release
|
||||||
|
|
||||||
|
To download and upload the image:
|
||||||
|
|
||||||
|
\`\`\`
|
||||||
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base
|
||||||
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm
|
||||||
|
|
||||||
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base
|
||||||
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:latest-base
|
||||||
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
||||||
|
docker push vllm/vllm-openai-rocm:latest-base
|
||||||
|
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
||||||
|
|
||||||
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}
|
||||||
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT} vllm/vllm-openai-rocm:latest
|
||||||
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT} vllm/vllm-openai-rocm:v${RELEASE_VERSION}
|
||||||
|
docker push vllm/vllm-openai-rocm:latest
|
||||||
|
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ def parse_from_filename(file: str) -> WheelFileInfo:
|
|||||||
|
|
||||||
def generate_project_list(subdir_names: list[str], comment: str = "") -> str:
|
def generate_project_list(subdir_names: list[str], comment: str = "") -> str:
|
||||||
"""
|
"""
|
||||||
Generate project list HTML content linking to each project & variant sub-directory.
|
Generate project list HTML content linking to each project & variant subdirectory.
|
||||||
"""
|
"""
|
||||||
href_tags = []
|
href_tags = []
|
||||||
for name in sorted(subdir_names):
|
for name in sorted(subdir_names):
|
||||||
@@ -168,23 +168,23 @@ def generate_index_and_metadata(
|
|||||||
comment (str | None): Optional comment to include in the generated HTML files.
|
comment (str | None): Optional comment to include in the generated HTML files.
|
||||||
|
|
||||||
First, parse all wheel files to extract metadata.
|
First, parse all wheel files to extract metadata.
|
||||||
We need to collect all wheel files for each variant, and generate an index for it (in a sub-directory).
|
We need to collect all wheel files for each variant, and generate an index for it (in a subdirectory).
|
||||||
The index for the default variant (if any) is generated in the root index directory.
|
The index for the default variant (if any) is generated in the root index directory.
|
||||||
|
|
||||||
If `default_variant` is provided, all wheels must have variant suffixes, and the default variant index
|
If `default_variant` is provided, all wheels must have variant suffixes, and the default variant index
|
||||||
is purely a copy of the corresponding variant index, with only the links adjusted.
|
is purely a copy of the corresponding variant index, with only the links adjusted.
|
||||||
Otherwise, all wheels without variant suffixes are treated as the default variant.
|
Otherwise, all wheels without variant suffixes are treated as the default variant.
|
||||||
|
|
||||||
If `alias_to_default` is provided, an additional alias sub-directory is created, it has the same content
|
If `alias_to_default` is provided, an additional alias subdirectory is created, it has the same content
|
||||||
as the default variant index, but the links are adjusted accordingly.
|
as the default variant index, but the links are adjusted accordingly.
|
||||||
|
|
||||||
Index directory structure:
|
Index directory structure:
|
||||||
index_base_dir/ (hosted at wheels.vllm.ai/{nightly,$commit,$version}/)
|
index_base_dir/ (hosted at wheels.vllm.ai/{nightly,$commit,$version}/)
|
||||||
index.html # project list, linking to "vllm/" and other packages, and all variant sub-directories
|
index.html # project list, linking to "vllm/" and other packages, and all variant subdirectories
|
||||||
vllm/
|
vllm/
|
||||||
index.html # package index, pointing to actual files in wheel_base_dir (relative path)
|
index.html # package index, pointing to actual files in wheel_base_dir (relative path)
|
||||||
metadata.json # machine-readable metadata for all wheels in this package
|
metadata.json # machine-readable metadata for all wheels in this package
|
||||||
cpu/ # cpu variant sub-directory
|
cpu/ # cpu variant subdirectory
|
||||||
index.html
|
index.html
|
||||||
vllm/
|
vllm/
|
||||||
index.html
|
index.html
|
||||||
@@ -194,7 +194,7 @@ def generate_index_and_metadata(
|
|||||||
vllm/
|
vllm/
|
||||||
index.html
|
index.html
|
||||||
metadata.json
|
metadata.json
|
||||||
cu130/ # cu130 variant sub-directory
|
cu130/ # cu130 variant subdirectory
|
||||||
index.html
|
index.html
|
||||||
vllm/
|
vllm/
|
||||||
index.html
|
index.html
|
||||||
|
|||||||
@@ -44,6 +44,17 @@ cleanup_docker() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cleanup_network() {
|
||||||
|
for node in $(seq 0 $((NUM_NODES-1))); do
|
||||||
|
if docker pr -a -q -f name="node${node}" | grep -q .; then
|
||||||
|
docker stop "node${node}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if docker network ls | grep docker-net; then
|
||||||
|
docker network rm docker-net
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# Call the cleanup docker function
|
# Call the cleanup docker function
|
||||||
cleanup_docker
|
cleanup_docker
|
||||||
|
|
||||||
@@ -224,6 +235,35 @@ if [[ $commands == *"--shard-id="* ]]; then
|
|||||||
echo "All shards reported no tests collected. Failing the build."
|
echo "All shards reported no tests collected. Failing the build."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
elif [[ $commands == *"VLLM_TEST_GROUP_NAME=mi325_4-2-node-tests-4-gpus-in-total"* ]]; then
|
||||||
|
|
||||||
|
export DCKR_VER=$(docker --version | sed 's/Docker version \(.*\), build .*/\1/')
|
||||||
|
|
||||||
|
if [[ "$commands" =~ ^(.*)"["(.*)"] && ["(.*)"]"$ ]]; then
|
||||||
|
prefix=$( echo "${BASH_REMATCH[1]}" | sed 's/;//g')
|
||||||
|
echo "PREFIX: ${prefix}"
|
||||||
|
export composite_command="(command rocm-smi || true)"
|
||||||
|
myIFS=$IFS
|
||||||
|
IFS=','
|
||||||
|
read -ra node0 <<< ${BASH_REMATCH[2]}
|
||||||
|
read -ra node1 <<< ${BASH_REMATCH[3]}
|
||||||
|
IFS=$myIFS
|
||||||
|
for i in "${!node0[@]}";do
|
||||||
|
command_node_0=$(echo ${node0[i]} | sed 's/\"//g')
|
||||||
|
command_node_1=$(echo ${node1[i]} | sed 's/\"//g')
|
||||||
|
|
||||||
|
export commands="./.buildkite/scripts/run-multi-node-test.sh /vllm-workspace/tests 2 2 ${image_name} '${command_node_0}' '${command_node_1}'"
|
||||||
|
echo "COMMANDS: ${commands}"
|
||||||
|
composite_command=$(echo "${composite_command} && ${commands}")
|
||||||
|
done
|
||||||
|
/bin/bash -c "${composite_command}"
|
||||||
|
cleanup_network
|
||||||
|
else
|
||||||
|
echo "Failed to parse node commands! Exiting."
|
||||||
|
cleanup_network
|
||||||
|
exit 111
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "Render devices: $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES"
|
echo "Render devices: $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES"
|
||||||
docker run \
|
docker run \
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ docker run \
|
|||||||
pytest -v -s v1/sample --ignore=v1/sample/test_logprobs.py --ignore=v1/sample/test_logprobs_e2e.py
|
pytest -v -s v1/sample --ignore=v1/sample/test_logprobs.py --ignore=v1/sample/test_logprobs_e2e.py
|
||||||
pytest -v -s v1/worker --ignore=v1/worker/test_gpu_model_runner.py
|
pytest -v -s v1/worker --ignore=v1/worker/test_gpu_model_runner.py
|
||||||
pytest -v -s v1/structured_output
|
pytest -v -s v1/structured_output
|
||||||
pytest -v -s v1/spec_decode --ignore=v1/spec_decode/test_max_len.py --ignore=v1/spec_decode/test_tree_attention.py --ignore=v1/spec_decode/test_speculators_eagle3.py
|
pytest -v -s v1/spec_decode --ignore=v1/spec_decode/test_max_len.py --ignore=v1/spec_decode/test_tree_attention.py --ignore=v1/spec_decode/test_speculators_eagle3.py --ignore=v1/spec_decode/test_acceptance_length.py
|
||||||
pytest -v -s v1/kv_connector/unit --ignore=v1/kv_connector/unit/test_multi_connector.py --ignore=v1/kv_connector/unit/test_nixl_connector.py --ignore=v1/kv_connector/unit/test_example_connector.py --ignore=v1/kv_connector/unit/test_lmcache_integration.py
|
pytest -v -s v1/kv_connector/unit --ignore=v1/kv_connector/unit/test_multi_connector.py --ignore=v1/kv_connector/unit/test_nixl_connector.py --ignore=v1/kv_connector/unit/test_example_connector.py --ignore=v1/kv_connector/unit/test_lmcache_integration.py
|
||||||
pytest -v -s v1/test_serial_utils.py
|
pytest -v -s v1/test_serial_utils.py
|
||||||
'
|
'
|
||||||
|
|||||||
@@ -7,17 +7,19 @@ SUBPATH=$BUILDKITE_COMMIT
|
|||||||
S3_COMMIT_PREFIX="s3://$BUCKET/$SUBPATH/"
|
S3_COMMIT_PREFIX="s3://$BUCKET/$SUBPATH/"
|
||||||
|
|
||||||
RELEASE_VERSION=$(buildkite-agent meta-data get release-version)
|
RELEASE_VERSION=$(buildkite-agent meta-data get release-version)
|
||||||
echo "Release version from Buildkite: $RELEASE_VERSION"
|
|
||||||
GIT_VERSION=$(git describe --exact-match --tags $BUILDKITE_COMMIT 2>/dev/null)
|
GIT_VERSION=$(git describe --exact-match --tags $BUILDKITE_COMMIT 2>/dev/null)
|
||||||
if [ -z "$GIT_VERSION" ]; then
|
|
||||||
|
echo "Release version from Buildkite: $RELEASE_VERSION"
|
||||||
|
|
||||||
|
if [[ -z "$GIT_VERSION" ]]; then
|
||||||
echo "[FATAL] Not on a git tag, cannot create release."
|
echo "[FATAL] Not on a git tag, cannot create release."
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
echo "Git version for commit $BUILDKITE_COMMIT: $GIT_VERSION"
|
echo "Git version for commit $BUILDKITE_COMMIT: $GIT_VERSION"
|
||||||
fi
|
fi
|
||||||
# sanity check for version mismatch
|
# sanity check for version mismatch
|
||||||
if [ "$RELEASE_VERSION" != "$GIT_VERSION" ]; then
|
if [[ "$RELEASE_VERSION" != "$GIT_VERSION" ]]; then
|
||||||
if [ "$FORCE_RELEASE_IGNORE_VERSION_MISMATCH" == "true" ]; then
|
if [[ "$FORCE_RELEASE_IGNORE_VERSION_MISMATCH" == "true" ]]; then
|
||||||
echo "[WARNING] Force release and ignore version mismatch"
|
echo "[WARNING] Force release and ignore version mismatch"
|
||||||
else
|
else
|
||||||
echo "[FATAL] Release version from Buildkite does not match Git version."
|
echo "[FATAL] Release version from Buildkite does not match Git version."
|
||||||
@@ -27,7 +29,7 @@ fi
|
|||||||
PURE_VERSION=${RELEASE_VERSION#v} # remove leading 'v'
|
PURE_VERSION=${RELEASE_VERSION#v} # remove leading 'v'
|
||||||
|
|
||||||
# check pypi token
|
# check pypi token
|
||||||
if [ -z "$PYPI_TOKEN" ]; then
|
if [[ -z "$PYPI_TOKEN" ]]; then
|
||||||
echo "[FATAL] PYPI_TOKEN is not set."
|
echo "[FATAL] PYPI_TOKEN is not set."
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
@@ -35,41 +37,8 @@ else
|
|||||||
export TWINE_PASSWORD="$PYPI_TOKEN"
|
export TWINE_PASSWORD="$PYPI_TOKEN"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# check github token
|
|
||||||
if [ -z "$GITHUB_TOKEN" ]; then
|
|
||||||
echo "[FATAL] GITHUB_TOKEN is not set."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
export GH_TOKEN="$GITHUB_TOKEN"
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -x # avoid printing secrets above
|
set -x # avoid printing secrets above
|
||||||
|
|
||||||
# download gh CLI from github
|
|
||||||
# Get latest gh CLI version from GitHub API
|
|
||||||
GH_VERSION=$(curl -s https://api.github.com/repos/cli/cli/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/' | sed 's/^v//')
|
|
||||||
if [ -z "$GH_VERSION" ]; then
|
|
||||||
echo "[FATAL] Failed to get latest gh CLI version from GitHub"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "Downloading gh CLI version: $GH_VERSION"
|
|
||||||
GH_TARBALL="gh_${GH_VERSION}_linux_amd64.tar.gz"
|
|
||||||
GH_URL="https://github.com/cli/cli/releases/download/v${GH_VERSION}/${GH_TARBALL}"
|
|
||||||
GH_INSTALL_DIR="/tmp/gh-install"
|
|
||||||
mkdir -p "$GH_INSTALL_DIR"
|
|
||||||
pushd "$GH_INSTALL_DIR"
|
|
||||||
curl -L -o "$GH_TARBALL" "$GH_URL"
|
|
||||||
tar -xzf "$GH_TARBALL"
|
|
||||||
GH_BIN=$(realpath $(find . -name "gh" -type f -executable | head -n 1))
|
|
||||||
if [ -z "$GH_BIN" ]; then
|
|
||||||
echo "[FATAL] Failed to find gh CLI executable"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "gh CLI downloaded successfully, version: $($GH_BIN --version)"
|
|
||||||
echo "Last 5 releases on GitHub:" # as a sanity check of gh and GH_TOKEN
|
|
||||||
command "$GH_BIN" release list --limit 5
|
|
||||||
popd
|
|
||||||
|
|
||||||
# install twine from pypi
|
# install twine from pypi
|
||||||
python3 -m venv /tmp/vllm-release-env
|
python3 -m venv /tmp/vllm-release-env
|
||||||
source /tmp/vllm-release-env/bin/activate
|
source /tmp/vllm-release-env/bin/activate
|
||||||
@@ -89,16 +58,13 @@ echo "Wheels copied to local directory"
|
|||||||
git archive --format=tar.gz --output="$DIST_DIR/vllm-${PURE_VERSION}.tar.gz" $BUILDKITE_COMMIT
|
git archive --format=tar.gz --output="$DIST_DIR/vllm-${PURE_VERSION}.tar.gz" $BUILDKITE_COMMIT
|
||||||
ls -la $DIST_DIR
|
ls -la $DIST_DIR
|
||||||
|
|
||||||
|
|
||||||
# upload wheels to PyPI (only default variant, i.e. files without '+' in the name)
|
# upload wheels to PyPI (only default variant, i.e. files without '+' in the name)
|
||||||
PYPI_WHEEL_FILES=$(find $DIST_DIR -name "vllm-${PURE_VERSION}*.whl" -not -name "*+*")
|
PYPI_WHEEL_FILES=$(find $DIST_DIR -name "vllm-${PURE_VERSION}*.whl" -not -name "*+*")
|
||||||
if [ -z "$PYPI_WHEEL_FILES" ]; then
|
if [[ -z "$PYPI_WHEEL_FILES" ]]; then
|
||||||
echo "No default variant wheels found, quitting..."
|
echo "No default variant wheels found, quitting..."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
python3 -m twine check $PYPI_WHEEL_FILES
|
|
||||||
python3 -m twine --non-interactive --verbose upload $PYPI_WHEEL_FILES
|
|
||||||
echo "Wheels uploaded to PyPI"
|
|
||||||
|
|
||||||
# create release on GitHub with the release version and all wheels
|
python3 -m twine check $PYPI_WHEEL_FILES
|
||||||
command "$GH_BIN" release create $GIT_VERSION -d --latest --notes-from-tag --verify-tag $DIST_DIR/*.whl
|
python3 -m twine upload --non-interactive --verbose $PYPI_WHEEL_FILES
|
||||||
|
echo "Wheels uploaded to PyPI"
|
||||||
@@ -640,8 +640,9 @@ steps:
|
|||||||
# grade: Blocking
|
# grade: Blocking
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- csrc/attention/
|
- csrc/attention/
|
||||||
- vllm/attention
|
|
||||||
- vllm/v1/attention
|
- vllm/v1/attention
|
||||||
|
# TODO: remove this dependency (https://github.com/vllm-project/vllm/issues/32267)
|
||||||
|
- vllm/model_executor/layers/attention
|
||||||
- tests/kernels/attention
|
- tests/kernels/attention
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s kernels/attention --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
- pytest -v -s kernels/attention --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
||||||
@@ -1131,7 +1132,7 @@ steps:
|
|||||||
- csrc/quantization/cutlass_w8a8/moe/
|
- csrc/quantization/cutlass_w8a8/moe/
|
||||||
- vllm/model_executor/layers/fused_moe/cutlass_moe.py
|
- vllm/model_executor/layers/fused_moe/cutlass_moe.py
|
||||||
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py
|
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py
|
||||||
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py
|
- vllm/model_executor/layers/fused_moe/flashinfer_a2a_prepare_finalize.py
|
||||||
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
||||||
- vllm/v1/attention/backends/flashinfer.py
|
- vllm/v1/attention/backends/flashinfer.py
|
||||||
- vllm/v1/attention/backends/mla/cutlass_mla.py
|
- vllm/v1/attention/backends/mla/cutlass_mla.py
|
||||||
@@ -1277,7 +1278,7 @@ steps:
|
|||||||
|
|
||||||
- label: 2 Node Tests (4 GPUs in total) # 16min
|
- label: 2 Node Tests (4 GPUs in total) # 16min
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 30
|
||||||
mirror_hardwares: [amdexperimental]
|
mirror_hardwares: [amdexperimental, amdmultinode]
|
||||||
agent_pool: mi325_4
|
agent_pool: mi325_4
|
||||||
# grade: Blocking
|
# grade: Blocking
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
@@ -1291,15 +1292,15 @@ steps:
|
|||||||
- tests/distributed/
|
- tests/distributed/
|
||||||
- tests/examples/offline_inference/data_parallel.py
|
- tests/examples/offline_inference/data_parallel.py
|
||||||
commands:
|
commands:
|
||||||
- # the following commands are for the first node, with ip 192.168.10.10 (ray environment already set up)
|
- # the following commands are for the first node, with ip 192.168.10.10 (ray environment already set up) | grep 'Same node test passed' | grep 'Node count test passed'
|
||||||
- VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed'
|
- VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py
|
||||||
- NUM_NODES=2 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_node_count.py | grep 'Node count test passed'
|
- NUM_NODES=2 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_node_count.py
|
||||||
- python3 ../examples/offline_inference/data_parallel.py -dp=2 -tp=1 --dp-num-nodes=2 --dp-node-rank=0 --dp-master-addr=192.168.10.10 --dp-master-port=12345 --enforce-eager --trust-remote-code
|
- python3 ../examples/offline_inference/data_parallel.py -dp=2 -tp=1 --dp-num-nodes=2 --dp-node-rank=0 --dp-master-addr=192.168.10.10 --dp-master-port=12345 --enforce-eager --trust-remote-code
|
||||||
- VLLM_MULTI_NODE=1 pytest -v -s distributed/test_multi_node_assignment.py
|
- VLLM_MULTI_NODE=1 pytest -v -s distributed/test_multi_node_assignment.py
|
||||||
- VLLM_MULTI_NODE=1 pytest -v -s distributed/test_pipeline_parallel.py
|
- VLLM_MULTI_NODE=1 pytest -v -s distributed/test_pipeline_parallel.py
|
||||||
- # the following commands are for the second node, with ip 192.168.10.11 (ray environment already set up)
|
- # the following commands are for the second node, with ip 192.168.10.11 (ray environment already set up)
|
||||||
- VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed'
|
- VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py
|
||||||
- NUM_NODES=2 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_node_count.py | grep 'Node count test passed'
|
- NUM_NODES=2 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_node_count.py
|
||||||
- python3 ../examples/offline_inference/data_parallel.py -dp=2 -tp=1 --dp-num-nodes=2 --dp-node-rank=1 --dp-master-addr=192.168.10.10 --dp-master-port=12345 --enforce-eager --trust-remote-code
|
- python3 ../examples/offline_inference/data_parallel.py -dp=2 -tp=1 --dp-num-nodes=2 --dp-node-rank=1 --dp-master-addr=192.168.10.10 --dp-master-port=12345 --enforce-eager --trust-remote-code
|
||||||
|
|
||||||
- label: Distributed Tests (2 GPUs) # 68min
|
- label: Distributed Tests (2 GPUs) # 68min
|
||||||
@@ -1508,6 +1509,9 @@ steps:
|
|||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
commands:
|
commands:
|
||||||
|
# Work around HIP bug tracked here: https://github.com/ROCm/hip/issues/3876
|
||||||
|
# TODO: Remove when the bug is fixed in a future ROCm release
|
||||||
|
- export TORCH_NCCL_BLOCKING_WAIT=1
|
||||||
# NOTE: don't test llama model here, it seems hf implementation is buggy
|
# NOTE: don't test llama model here, it seems hf implementation is buggy
|
||||||
# see https://github.com/vllm-project/vllm/pull/5689 for details
|
# see https://github.com/vllm-project/vllm/pull/5689 for details
|
||||||
- pytest -v -s distributed/test_custom_all_reduce.py
|
- pytest -v -s distributed/test_custom_all_reduce.py
|
||||||
|
|||||||
@@ -362,7 +362,7 @@ steps:
|
|||||||
- pytest -v -s v1/sample
|
- pytest -v -s v1/sample
|
||||||
- pytest -v -s v1/logits_processors
|
- pytest -v -s v1/logits_processors
|
||||||
- pytest -v -s v1/worker
|
- pytest -v -s v1/worker
|
||||||
- pytest -v -s v1/spec_decode
|
- pytest -v -s -m 'not slow_test' v1/spec_decode
|
||||||
- pytest -v -s -m 'not cpu_test' v1/kv_connector/unit
|
- pytest -v -s -m 'not cpu_test' v1/kv_connector/unit
|
||||||
- pytest -v -s -m 'not cpu_test' v1/metrics
|
- pytest -v -s -m 'not cpu_test' v1/metrics
|
||||||
- pytest -v -s v1/test_oracle.py
|
- pytest -v -s v1/test_oracle.py
|
||||||
@@ -568,8 +568,9 @@ steps:
|
|||||||
mirror_hardwares: [amdexperimental]
|
mirror_hardwares: [amdexperimental]
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- csrc/attention/
|
- csrc/attention/
|
||||||
- vllm/attention
|
|
||||||
- vllm/v1/attention
|
- vllm/v1/attention
|
||||||
|
# TODO: remove this dependency (https://github.com/vllm-project/vllm/issues/32267)
|
||||||
|
- vllm/model_executor/layers/attention
|
||||||
- tests/kernels/attention
|
- tests/kernels/attention
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s kernels/attention --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
- pytest -v -s kernels/attention --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
||||||
@@ -1017,7 +1018,7 @@ steps:
|
|||||||
- csrc/quantization/cutlass_w8a8/moe/
|
- csrc/quantization/cutlass_w8a8/moe/
|
||||||
- vllm/model_executor/layers/fused_moe/cutlass_moe.py
|
- vllm/model_executor/layers/fused_moe/cutlass_moe.py
|
||||||
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py
|
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py
|
||||||
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py
|
- vllm/model_executor/layers/fused_moe/flashinfer_a2a_prepare_finalize.py
|
||||||
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
||||||
- vllm/v1/attention/backends/flashinfer.py
|
- vllm/v1/attention/backends/flashinfer.py
|
||||||
- vllm/v1/attention/backends/mla/cutlass_mla.py
|
- vllm/v1/attention/backends/mla/cutlass_mla.py
|
||||||
@@ -1316,7 +1317,7 @@ steps:
|
|||||||
- pytest -v -s distributed/test_distributed_oot.py
|
- pytest -v -s distributed/test_distributed_oot.py
|
||||||
- pytest -v -s entrypoints/openai/test_oot_registration.py # it needs a clean process
|
- pytest -v -s entrypoints/openai/test_oot_registration.py # it needs a clean process
|
||||||
- pytest -v -s models/test_oot_registration.py # it needs a clean process
|
- pytest -v -s models/test_oot_registration.py # it needs a clean process
|
||||||
- pytest -v -s plugins/lora_resolvers # unit tests for in-tree lora resolver plugins
|
- pytest -v -s plugins/lora_resolvers # unit tests for lora resolver plugins
|
||||||
|
|
||||||
- label: Pipeline + Context Parallelism Test # 45min
|
- label: Pipeline + Context Parallelism Test # 45min
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
@@ -1419,6 +1420,20 @@ steps:
|
|||||||
- TARGET_TEST_SUITE=A100 pytest basic_correctness/ -v -s -m 'distributed(num_gpus=2)'
|
- TARGET_TEST_SUITE=A100 pytest basic_correctness/ -v -s -m 'distributed(num_gpus=2)'
|
||||||
- pytest -v -s -x lora/test_mixtral.py
|
- pytest -v -s -x lora/test_mixtral.py
|
||||||
|
|
||||||
|
- label: Acceptance Length Test (Large Models) # optional
|
||||||
|
timeout_in_minutes: 120
|
||||||
|
gpu: h100
|
||||||
|
optional: true
|
||||||
|
num_gpus: 1
|
||||||
|
working_dir: "/vllm-workspace/tests"
|
||||||
|
source_file_dependencies:
|
||||||
|
- vllm/v1/spec_decode/
|
||||||
|
- vllm/model_executor/models/mlp_speculator.py
|
||||||
|
- tests/v1/spec_decode/test_acceptance_length.py
|
||||||
|
commands:
|
||||||
|
- export VLLM_ALLOW_INSECURE_SERIALIZATION=1
|
||||||
|
- pytest -v -s v1/spec_decode/test_acceptance_length.py -m slow_test
|
||||||
|
|
||||||
- label: LM Eval Large Models # optional
|
- label: LM Eval Large Models # optional
|
||||||
gpu: a100
|
gpu: a100
|
||||||
optional: true
|
optional: true
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ depends_on:
|
|||||||
steps:
|
steps:
|
||||||
- label: V1 attention (H100)
|
- label: V1 attention (H100)
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 30
|
||||||
gpu: h100
|
device: h100
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/config/attention.py
|
- vllm/config/attention.py
|
||||||
- vllm/model_executor/layers/attention
|
- vllm/model_executor/layers/attention
|
||||||
@@ -15,7 +15,7 @@ steps:
|
|||||||
|
|
||||||
- label: V1 attention (B200)
|
- label: V1 attention (B200)
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 30
|
||||||
gpu: b200
|
device: b200
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/config/attention.py
|
- vllm/config/attention.py
|
||||||
- vllm/model_executor/layers/attention
|
- vllm/model_executor/layers/attention
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ steps:
|
|||||||
- label: Fusion and Compile Tests (B200)
|
- label: Fusion and Compile Tests (B200)
|
||||||
timeout_in_minutes: 40
|
timeout_in_minutes: 40
|
||||||
working_dir: "/vllm-workspace/"
|
working_dir: "/vllm-workspace/"
|
||||||
gpu: b200
|
device: b200
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- csrc/quantization/fp4/
|
- csrc/quantization/fp4/
|
||||||
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
||||||
@@ -26,7 +26,7 @@ steps:
|
|||||||
- nvidia-smi
|
- nvidia-smi
|
||||||
- pytest -v -s tests/compile/test_fusion_attn.py
|
- pytest -v -s tests/compile/test_fusion_attn.py
|
||||||
- pytest -v -s tests/compile/test_silu_mul_quant_fusion.py
|
- pytest -v -s tests/compile/test_silu_mul_quant_fusion.py
|
||||||
# this runner has 2 GPUs available even though num_gpus=2 is not set
|
# this runner has 2 GPUs available even though num_devices=2 is not set
|
||||||
- pytest -v -s tests/compile/distributed/test_fusion_all_reduce.py
|
- pytest -v -s tests/compile/distributed/test_fusion_all_reduce.py
|
||||||
# Limit to Inductor partition, no custom ops, and allreduce & attn fusion to reduce running time
|
# Limit to Inductor partition, no custom ops, and allreduce & attn fusion to reduce running time
|
||||||
# Wrap with quotes to escape yaml
|
# Wrap with quotes to escape yaml
|
||||||
@@ -37,9 +37,9 @@ steps:
|
|||||||
- label: Fusion E2E (2 GPUs)(B200)
|
- label: Fusion E2E (2 GPUs)(B200)
|
||||||
timeout_in_minutes: 40
|
timeout_in_minutes: 40
|
||||||
working_dir: "/vllm-workspace/"
|
working_dir: "/vllm-workspace/"
|
||||||
gpu: b200
|
device: b200
|
||||||
optional: true
|
optional: true
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- csrc/quantization/fp4/
|
- csrc/quantization/fp4/
|
||||||
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ steps:
|
|||||||
- label: Distributed Comm Ops
|
- label: Distributed Comm Ops
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/distributed
|
- vllm/distributed
|
||||||
- tests/distributed
|
- tests/distributed
|
||||||
@@ -18,7 +18,7 @@ steps:
|
|||||||
- label: Distributed (2 GPUs)
|
- label: Distributed (2 GPUs)
|
||||||
timeout_in_minutes: 90
|
timeout_in_minutes: 90
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/compilation/
|
- vllm/compilation/
|
||||||
- vllm/distributed/
|
- vllm/distributed/
|
||||||
@@ -54,7 +54,7 @@ steps:
|
|||||||
- label: Distributed Tests (4 GPUs)
|
- label: Distributed Tests (4 GPUs)
|
||||||
timeout_in_minutes: 50
|
timeout_in_minutes: 50
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 4
|
num_devices: 4
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/distributed/
|
- vllm/distributed/
|
||||||
- tests/distributed/test_utils
|
- tests/distributed/test_utils
|
||||||
@@ -103,8 +103,8 @@ steps:
|
|||||||
|
|
||||||
- label: Distributed Tests (8 GPUs)(H100)
|
- label: Distributed Tests (8 GPUs)(H100)
|
||||||
timeout_in_minutes: 10
|
timeout_in_minutes: 10
|
||||||
gpu: h100
|
device: h100
|
||||||
num_gpus: 8
|
num_devices: 8
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- examples/offline_inference/torchrun_dp_example.py
|
- examples/offline_inference/torchrun_dp_example.py
|
||||||
@@ -120,9 +120,9 @@ steps:
|
|||||||
- torchrun --nproc-per-node=8 ../examples/offline_inference/torchrun_dp_example.py --tp-size=2 --pp-size=1 --dp-size=4 --enable-ep
|
- torchrun --nproc-per-node=8 ../examples/offline_inference/torchrun_dp_example.py --tp-size=2 --pp-size=1 --dp-size=4 --enable-ep
|
||||||
|
|
||||||
- label: Distributed Tests (4 GPUs)(A100)
|
- label: Distributed Tests (4 GPUs)(A100)
|
||||||
gpu: a100
|
device: a100
|
||||||
optional: true
|
optional: true
|
||||||
num_gpus: 4
|
num_devices: 4
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
commands:
|
commands:
|
||||||
@@ -133,26 +133,34 @@ steps:
|
|||||||
- TARGET_TEST_SUITE=A100 pytest basic_correctness/ -v -s -m 'distributed(num_gpus=2)'
|
- TARGET_TEST_SUITE=A100 pytest basic_correctness/ -v -s -m 'distributed(num_gpus=2)'
|
||||||
- pytest -v -s -x lora/test_mixtral.py
|
- pytest -v -s -x lora/test_mixtral.py
|
||||||
|
|
||||||
- label: Distributed Tests (2 GPUs)(H200)
|
- label: Sequence Parallel Tests (H100)
|
||||||
gpu: h200
|
timeout_in_minutes: 60
|
||||||
|
working_dir: "/vllm-workspace/"
|
||||||
|
device: h100
|
||||||
|
optional: true
|
||||||
|
num_devices: 2
|
||||||
|
commands:
|
||||||
|
- export VLLM_TEST_CLEAN_GPU_MEMORY=1
|
||||||
|
# Run sequence parallel tests
|
||||||
|
- pytest -v -s tests/distributed/test_sequence_parallel.py
|
||||||
|
- pytest -v -s tests/compile/distributed/test_sequence_parallelism.py
|
||||||
|
|
||||||
|
- label: Distributed Tests (2 GPUs)(H100)
|
||||||
|
device: h100
|
||||||
optional: true
|
optional: true
|
||||||
working_dir: "/vllm-workspace/"
|
working_dir: "/vllm-workspace/"
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
commands:
|
commands:
|
||||||
- VLLM_TEST_CLEAN_GPU_MEMORY=1 pytest -v -s tests/compile/distributed/test_async_tp.py
|
- VLLM_TEST_CLEAN_GPU_MEMORY=1 pytest -v -s tests/compile/distributed/test_async_tp.py
|
||||||
- pytest -v -s tests/compile/distributed/test_sequence_parallelism.py
|
|
||||||
- pytest -v -s tests/compile/distributed/test_fusion_all_reduce.py
|
|
||||||
- VLLM_TEST_CLEAN_GPU_MEMORY=1 pytest -v -s tests/compile/distributed/test_fusions_e2e.py -k 'not Llama-4'
|
|
||||||
- VLLM_TEST_CLEAN_GPU_MEMORY=1 pytest -v -s tests/distributed/test_sequence_parallel.py
|
|
||||||
- pytest -v -s tests/distributed/test_context_parallel.py
|
- pytest -v -s tests/distributed/test_context_parallel.py
|
||||||
- CUDA_VISIBLE_DEVICES=1,2 VLLM_USE_DEEP_GEMM=1 VLLM_LOGGING_LEVEL=DEBUG python3 examples/offline_inference/data_parallel.py --model=Qwen/Qwen1.5-MoE-A2.7B -tp=1 -dp=2 --max-model-len=2048 --all2all-backend=deepep_high_throughput
|
- VLLM_USE_DEEP_GEMM=1 VLLM_LOGGING_LEVEL=DEBUG python3 examples/offline_inference/data_parallel.py --model=Qwen/Qwen1.5-MoE-A2.7B -tp=1 -dp=2 --max-model-len=2048 --all2all-backend=deepep_high_throughput
|
||||||
- pytest -v -s tests/v1/distributed/test_dbo.py
|
- pytest -v -s tests/v1/distributed/test_dbo.py
|
||||||
|
|
||||||
- label: Distributed Tests (2 GPUs)(B200)
|
- label: Distributed Tests (2 GPUs)(B200)
|
||||||
gpu: b200
|
device: b200
|
||||||
optional: true
|
optional: true
|
||||||
working_dir: "/vllm-workspace/"
|
working_dir: "/vllm-workspace/"
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s tests/distributed/test_context_parallel.py
|
- pytest -v -s tests/distributed/test_context_parallel.py
|
||||||
- pytest -v -s tests/distributed/test_nccl_symm_mem_allreduce.py
|
- pytest -v -s tests/distributed/test_nccl_symm_mem_allreduce.py
|
||||||
@@ -161,8 +169,9 @@ steps:
|
|||||||
- label: 2 Node Test (4 GPUs)
|
- label: 2 Node Test (4 GPUs)
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 30
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
num_nodes: 2
|
num_nodes: 2
|
||||||
|
no_plugin: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/distributed/
|
- vllm/distributed/
|
||||||
- vllm/engine/
|
- vllm/engine/
|
||||||
@@ -171,12 +180,12 @@ steps:
|
|||||||
- tests/distributed/
|
- tests/distributed/
|
||||||
- tests/examples/offline_inference/data_parallel.py
|
- tests/examples/offline_inference/data_parallel.py
|
||||||
commands:
|
commands:
|
||||||
- ./.buildkite/scripts/run-multi-node-test.sh /vllm-workspace/tests 2 2 public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:0bec63fa317e1fbd62e19b0fc31c43c81bf89077 "VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed' && NUM_NODES=2 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_node_count.py | grep 'Node count test passed' && python3 ../examples/offline_inference/data_parallel.py -dp=2 -tp=1 --dp-num-nodes=2 --dp-node-rank=0 --dp-master-addr=192.168.10.10 --dp-master-port=12345 --enforce-eager --trust-remote-code && VLLM_MULTI_NODE=1 pytest -v -s distributed/test_multi_node_assignment.py && VLLM_MULTI_NODE=1 pytest -v -s distributed/test_pipeline_parallel.py" "VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed' && NUM_NODES=2 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_node_count.py | grep 'Node count test passed' && python3 ../examples/offline_inference/data_parallel.py -dp=2 -tp=1 --dp-num-nodes=2 --dp-node-rank=1 --dp-master-addr=192.168.10.10 --dp-master-port=12345 --enforce-eager --trust-remote-code"
|
- ./.buildkite/scripts/run-multi-node-test.sh /vllm-workspace/tests 2 2 $IMAGE_TAG "VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed' && NUM_NODES=2 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_node_count.py | grep 'Node count test passed' && python3 ../examples/offline_inference/data_parallel.py -dp=2 -tp=1 --dp-num-nodes=2 --dp-node-rank=0 --dp-master-addr=192.168.10.10 --dp-master-port=12345 --enforce-eager --trust-remote-code && VLLM_MULTI_NODE=1 pytest -v -s distributed/test_multi_node_assignment.py && VLLM_MULTI_NODE=1 pytest -v -s distributed/test_pipeline_parallel.py" "VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed' && NUM_NODES=2 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_node_count.py | grep 'Node count test passed' && python3 ../examples/offline_inference/data_parallel.py -dp=2 -tp=1 --dp-num-nodes=2 --dp-node-rank=1 --dp-master-addr=192.168.10.10 --dp-master-port=12345 --enforce-eager --trust-remote-code"
|
||||||
|
|
||||||
- label: Distributed NixlConnector PD accuracy (4 GPUs)
|
- label: Distributed NixlConnector PD accuracy (4 GPUs)
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 30
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 4
|
num_devices: 4
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py
|
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py
|
||||||
- tests/v1/kv_connector/nixl_integration/
|
- tests/v1/kv_connector/nixl_integration/
|
||||||
@@ -184,10 +193,21 @@ steps:
|
|||||||
- uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
|
- uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
|
||||||
- bash v1/kv_connector/nixl_integration/config_sweep_accuracy_test.sh
|
- bash v1/kv_connector/nixl_integration/config_sweep_accuracy_test.sh
|
||||||
|
|
||||||
|
- label: DP EP Distributed NixlConnector PD accuracy tests (4 GPUs)
|
||||||
|
timeout_in_minutes: 30
|
||||||
|
working_dir: "/vllm-workspace/tests"
|
||||||
|
num_devices: 4
|
||||||
|
source_file_dependencies:
|
||||||
|
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py
|
||||||
|
- tests/v1/kv_connector/nixl_integration/
|
||||||
|
commands:
|
||||||
|
- uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
|
||||||
|
- DP_EP=1 bash v1/kv_connector/nixl_integration/config_sweep_accuracy_test.sh
|
||||||
|
|
||||||
- label: Pipeline + Context Parallelism (4 GPUs))
|
- label: Pipeline + Context Parallelism (4 GPUs))
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 4
|
num_devices: 4
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/distributed/
|
- vllm/distributed/
|
||||||
- vllm/engine/
|
- vllm/engine/
|
||||||
@@ -197,3 +217,45 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- pytest -v -s distributed/test_pp_cudagraph.py
|
- pytest -v -s distributed/test_pp_cudagraph.py
|
||||||
- pytest -v -s distributed/test_pipeline_parallel.py
|
- pytest -v -s distributed/test_pipeline_parallel.py
|
||||||
|
|
||||||
|
- label: Hopper Fusion E2E Tests (H100)
|
||||||
|
timeout_in_minutes: 70
|
||||||
|
working_dir: "/vllm-workspace/"
|
||||||
|
device: h100
|
||||||
|
optional: true
|
||||||
|
source_file_dependencies:
|
||||||
|
- csrc/quantization/fp4/
|
||||||
|
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
||||||
|
- vllm/v1/attention/backends/flashinfer.py
|
||||||
|
- vllm/compilation/
|
||||||
|
# can affect pattern matching
|
||||||
|
- vllm/model_executor/layers/layernorm.py
|
||||||
|
- vllm/model_executor/layers/activation.py
|
||||||
|
- vllm/model_executor/layers/quantization/input_quant_fp8.py
|
||||||
|
- tests/compile/test_fusion_attn.py
|
||||||
|
commands:
|
||||||
|
- export VLLM_TEST_CLEAN_GPU_MEMORY=1
|
||||||
|
# skip Llama-4 since it does not fit on this device
|
||||||
|
- pytest -v -s tests/compile/test_fusion_attn.py -k 'not Llama-4'
|
||||||
|
|
||||||
|
- label: Hopper Fusion Distributed E2E Tests (2xH100)
|
||||||
|
timeout_in_minutes: 70
|
||||||
|
working_dir: "/vllm-workspace/"
|
||||||
|
device: h100
|
||||||
|
optional: true
|
||||||
|
num_devices: 2
|
||||||
|
source_file_dependencies:
|
||||||
|
- csrc/quantization/fp4/
|
||||||
|
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
||||||
|
- vllm/v1/attention/backends/flashinfer.py
|
||||||
|
- vllm/compilation/
|
||||||
|
# can affect pattern matching
|
||||||
|
- vllm/model_executor/layers/layernorm.py
|
||||||
|
- vllm/model_executor/layers/activation.py
|
||||||
|
- vllm/model_executor/layers/quantization/input_quant_fp8.py
|
||||||
|
- tests/compile/distributed/test_fusions_e2e.py
|
||||||
|
commands:
|
||||||
|
- export VLLM_TEST_CLEAN_GPU_MEMORY=1
|
||||||
|
# Run all e2e fusion tests
|
||||||
|
- pytest -v -s tests/compile/distributed/test_fusions_e2e.py -k 'not Llama-4'
|
||||||
|
- pytest -v -s tests/compile/distributed/test_fusion_all_reduce.py
|
||||||
|
|||||||
@@ -4,27 +4,27 @@ depends_on:
|
|||||||
steps:
|
steps:
|
||||||
- label: DeepSeek V2-Lite Accuracy
|
- label: DeepSeek V2-Lite Accuracy
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
gpu: h100
|
device: h100
|
||||||
optional: true
|
optional: true
|
||||||
num_gpus: 4
|
num_devices: 4
|
||||||
working_dir: "/vllm-workspace"
|
working_dir: "/vllm-workspace"
|
||||||
commands:
|
commands:
|
||||||
- bash .buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_ep_eplb.sh 0.25 200 8010
|
- bash .buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_ep_eplb.sh 0.25 200 8010
|
||||||
|
|
||||||
- label: Qwen3-30B-A3B-FP8-block Accuracy
|
- label: Qwen3-30B-A3B-FP8-block Accuracy
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
gpu: h100
|
device: h100
|
||||||
optional: true
|
optional: true
|
||||||
num_gpus: 4
|
num_devices: 4
|
||||||
working_dir: "/vllm-workspace"
|
working_dir: "/vllm-workspace"
|
||||||
commands:
|
commands:
|
||||||
- bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh 0.8 200 8020
|
- bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh 0.8 200 8020
|
||||||
|
|
||||||
- label: Qwen3-30B-A3B-FP8-block Accuracy (B200)
|
- label: Qwen3-30B-A3B-FP8-block Accuracy (B200)
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
gpu: b200
|
device: b200
|
||||||
optional: true
|
optional: true
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
working_dir: "/vllm-workspace"
|
working_dir: "/vllm-workspace"
|
||||||
commands:
|
commands:
|
||||||
- bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh 0.8 200 8020 2 1
|
- bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh 0.8 200 8020 2 1
|
||||||
@@ -33,10 +33,11 @@ steps:
|
|||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 30
|
||||||
optional: true
|
optional: true
|
||||||
soft_fail: true
|
soft_fail: true
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
working_dir: "/vllm-workspace"
|
working_dir: "/vllm-workspace"
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
- .buildkite/scripts/run-prime-rl-test.sh
|
- .buildkite/scripts/run-prime-rl-test.sh
|
||||||
commands:
|
commands:
|
||||||
|
- nvidia-smi
|
||||||
- bash .buildkite/scripts/run-prime-rl-test.sh
|
- bash .buildkite/scripts/run-prime-rl-test.sh
|
||||||
|
|||||||
@@ -23,4 +23,8 @@ steps:
|
|||||||
# TODO: accuracy does not match, whether setting
|
# TODO: accuracy does not match, whether setting
|
||||||
# VLLM_USE_FLASHINFER_SAMPLER or not on H100.
|
# VLLM_USE_FLASHINFER_SAMPLER or not on H100.
|
||||||
- pytest -v -s v1/e2e
|
- pytest -v -s v1/e2e
|
||||||
- pytest -v -s v1/engine
|
# Run this test standalone for now;
|
||||||
|
# need to untangle use (implicit) use of spawn/fork across the tests.
|
||||||
|
- pytest -v -s v1/engine/test_preprocess_error_handling.py
|
||||||
|
# Run the rest of v1/engine tests
|
||||||
|
- pytest -v -s v1/engine --ignore v1/engine/test_preprocess_error_handling.py
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ steps:
|
|||||||
- label: EPLB Execution
|
- label: EPLB Execution
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 4
|
num_devices: 4
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/distributed/eplb
|
- vllm/distributed/eplb
|
||||||
- tests/distributed/test_eplb_execute.py
|
- tests/distributed/test_eplb_execute.py
|
||||||
|
|||||||
@@ -15,8 +15,9 @@ steps:
|
|||||||
timeout_in_minutes: 35
|
timeout_in_minutes: 35
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- csrc/attention/
|
- csrc/attention/
|
||||||
- vllm/attention
|
|
||||||
- vllm/v1/attention
|
- vllm/v1/attention
|
||||||
|
# TODO: remove this dependency (https://github.com/vllm-project/vllm/issues/32267)
|
||||||
|
- vllm/model_executor/layers/attention
|
||||||
- tests/kernels/attention
|
- tests/kernels/attention
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s kernels/attention --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
- pytest -v -s kernels/attention --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
||||||
@@ -57,8 +58,8 @@ steps:
|
|||||||
|
|
||||||
- label: Kernels DeepGEMM Test (H100)
|
- label: Kernels DeepGEMM Test (H100)
|
||||||
timeout_in_minutes: 45
|
timeout_in_minutes: 45
|
||||||
gpu: h100
|
device: h100
|
||||||
num_gpus: 1
|
num_devices: 1
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- tools/install_deepgemm.sh
|
- tools/install_deepgemm.sh
|
||||||
- vllm/utils/deep_gemm.py
|
- vllm/utils/deep_gemm.py
|
||||||
@@ -77,7 +78,7 @@ steps:
|
|||||||
- label: Kernels (B200)
|
- label: Kernels (B200)
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 30
|
||||||
working_dir: "/vllm-workspace/"
|
working_dir: "/vllm-workspace/"
|
||||||
gpu: b200
|
device: b200
|
||||||
# optional: true
|
# optional: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- csrc/quantization/fp4/
|
- csrc/quantization/fp4/
|
||||||
@@ -85,7 +86,7 @@ steps:
|
|||||||
- csrc/quantization/cutlass_w8a8/moe/
|
- csrc/quantization/cutlass_w8a8/moe/
|
||||||
- vllm/model_executor/layers/fused_moe/cutlass_moe.py
|
- vllm/model_executor/layers/fused_moe/cutlass_moe.py
|
||||||
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py
|
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_moe.py
|
||||||
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py
|
- vllm/model_executor/layers/fused_moe/flashinfer_a2a_prepare_finalize.py
|
||||||
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
|
||||||
- vllm/v1/attention/backends/flashinfer.py
|
- vllm/v1/attention/backends/flashinfer.py
|
||||||
- vllm/v1/attention/backends/mla/cutlass_mla.py
|
- vllm/v1/attention/backends/mla/cutlass_mla.py
|
||||||
@@ -115,3 +116,54 @@ steps:
|
|||||||
- pytest -v -s tests/kernels/moe/test_ocp_mx_moe.py
|
- pytest -v -s tests/kernels/moe/test_ocp_mx_moe.py
|
||||||
- pytest -v -s tests/kernels/moe/test_flashinfer.py
|
- pytest -v -s tests/kernels/moe/test_flashinfer.py
|
||||||
- pytest -v -s tests/kernels/moe/test_cutedsl_moe.py
|
- pytest -v -s tests/kernels/moe/test_cutedsl_moe.py
|
||||||
|
# e2e
|
||||||
|
- pytest -v -s tests/models/quantization/test_nvfp4.py
|
||||||
|
|
||||||
|
- label: Kernels Helion Test
|
||||||
|
timeout_in_minutes: 30
|
||||||
|
device: h100
|
||||||
|
source_file_dependencies:
|
||||||
|
- vllm/utils/import_utils.py
|
||||||
|
- tests/kernels/helion/
|
||||||
|
commands:
|
||||||
|
- pip install helion
|
||||||
|
- pytest -v -s kernels/helion/
|
||||||
|
|
||||||
|
|
||||||
|
- label: Kernels FP8 MoE Test (1 H100)
|
||||||
|
timeout_in_minutes: 90
|
||||||
|
device: h100
|
||||||
|
num_devices: 1
|
||||||
|
optional: true
|
||||||
|
commands:
|
||||||
|
- pytest -v -s kernels/moe/test_cutlass_moe.py
|
||||||
|
- pytest -v -s kernels/moe/test_flashinfer.py
|
||||||
|
- pytest -v -s kernels/moe/test_gpt_oss_triton_kernels.py
|
||||||
|
- pytest -v -s kernels/moe/test_modular_oai_triton_moe.py
|
||||||
|
- pytest -v -s kernels/moe/test_moe.py
|
||||||
|
# - pytest -v -s kernels/moe/test_block_fp8.py - failing on main
|
||||||
|
- pytest -v -s kernels/moe/test_block_int8.py
|
||||||
|
- pytest -v -s kernels/moe/test_triton_moe_no_act_mul.py
|
||||||
|
- pytest -v -s kernels/moe/test_triton_moe_ptpc_fp8.py
|
||||||
|
|
||||||
|
- label: Kernels FP8 MoE Test (2 H100s)
|
||||||
|
timeout_in_minutes: 90
|
||||||
|
device: h100
|
||||||
|
num_devices: 2
|
||||||
|
optional: true
|
||||||
|
commands:
|
||||||
|
- pytest -v -s kernels/moe/test_deepep_deepgemm_moe.py
|
||||||
|
- pytest -v -s kernels/moe/test_deepep_moe.py
|
||||||
|
- pytest -v -s kernels/moe/test_pplx_cutlass_moe.py
|
||||||
|
# - pytest -v -s kernels/moe/test_pplx_moe.py - failing on main
|
||||||
|
|
||||||
|
- label: Kernels Fp4 MoE Test (B200)
|
||||||
|
timeout_in_minutes: 60
|
||||||
|
device: b200
|
||||||
|
num_devices: 1
|
||||||
|
optional: true
|
||||||
|
commands:
|
||||||
|
- pytest -v -s kernels/moe/test_cutedsl_moe.py
|
||||||
|
- pytest -v -s kernels/moe/test_flashinfer_moe.py
|
||||||
|
- pytest -v -s kernels/moe/test_nvfp4_moe.py
|
||||||
|
- pytest -v -s kernels/moe/test_ocp_mx_moe.py
|
||||||
|
|||||||
@@ -12,9 +12,9 @@ steps:
|
|||||||
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt
|
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt
|
||||||
|
|
||||||
- label: LM Eval Large Models (4 GPUs)(A100)
|
- label: LM Eval Large Models (4 GPUs)(A100)
|
||||||
gpu: a100
|
device: a100
|
||||||
optional: true
|
optional: true
|
||||||
num_gpus: 4
|
num_devices: 4
|
||||||
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- csrc/
|
- csrc/
|
||||||
@@ -24,9 +24,9 @@ steps:
|
|||||||
- pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large.txt --tp-size=4
|
- pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large.txt --tp-size=4
|
||||||
|
|
||||||
- label: LM Eval Large Models (4 GPUs)(H100)
|
- label: LM Eval Large Models (4 GPUs)(H100)
|
||||||
gpu: h100
|
device: h100
|
||||||
optional: true
|
optional: true
|
||||||
num_gpus: 4
|
num_devices: 4
|
||||||
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- csrc/
|
- csrc/
|
||||||
@@ -37,10 +37,39 @@ steps:
|
|||||||
|
|
||||||
- label: LM Eval Small Models (B200)
|
- label: LM Eval Small Models (B200)
|
||||||
timeout_in_minutes: 120
|
timeout_in_minutes: 120
|
||||||
gpu: b200
|
device: b200
|
||||||
optional: true
|
optional: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- csrc/
|
- csrc/
|
||||||
- vllm/model_executor/layers/quantization
|
- vllm/model_executor/layers/quantization
|
||||||
commands:
|
commands:
|
||||||
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-blackwell.txt
|
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-blackwell.txt
|
||||||
|
|
||||||
|
- label: LM Eval Large Models (H200)
|
||||||
|
timeout_in_minutes: 60
|
||||||
|
device: h200
|
||||||
|
optional: true
|
||||||
|
num_devices: 8
|
||||||
|
commands:
|
||||||
|
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-h200.txt
|
||||||
|
|
||||||
|
- label: MoE Refactor Integration Test (H100 - TEMPORARY)
|
||||||
|
device: h100
|
||||||
|
optional: true
|
||||||
|
num_devices: 2
|
||||||
|
commands:
|
||||||
|
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=evals/gsm8k/configs/moe-refactor/config-h100.txt
|
||||||
|
|
||||||
|
- label: MoE Refactor Integration Test (B200 - TEMPORARY)
|
||||||
|
device: b200
|
||||||
|
optional: true
|
||||||
|
num_devices: 2
|
||||||
|
commands:
|
||||||
|
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=evals/gsm8k/configs/moe-refactor/config-b200.txt
|
||||||
|
|
||||||
|
- label: MoE Refactor Integration Test (B200 DP - TEMPORARY)
|
||||||
|
device: b200
|
||||||
|
optional: true
|
||||||
|
num_devices: 2
|
||||||
|
commands:
|
||||||
|
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=evals/gsm8k/configs/moe-refactor-dp-ep/config-b200.txt
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ steps:
|
|||||||
|
|
||||||
- label: LoRA TP (Distributed)
|
- label: LoRA TP (Distributed)
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 30
|
||||||
num_gpus: 4
|
num_devices: 4
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/lora
|
- vllm/lora
|
||||||
- tests/lora
|
- tests/lora
|
||||||
|
|||||||
@@ -27,11 +27,12 @@ steps:
|
|||||||
- pytest -v -s entrypoints/openai/correctness/test_lmeval.py::test_lm_eval_accuracy_v1_engine
|
- pytest -v -s entrypoints/openai/correctness/test_lmeval.py::test_lm_eval_accuracy_v1_engine
|
||||||
|
|
||||||
- label: V1 Others (CPU)
|
- label: V1 Others (CPU)
|
||||||
depends_on: ~
|
depends_on:
|
||||||
|
- image-build-cpu
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
- tests/v1
|
- tests/v1
|
||||||
no_gpu: true
|
device: cpu
|
||||||
commands:
|
commands:
|
||||||
# split the test to avoid interference
|
# split the test to avoid interference
|
||||||
- pytest -v -s -m 'cpu_test' v1/core
|
- pytest -v -s -m 'cpu_test' v1/core
|
||||||
@@ -82,7 +83,7 @@ steps:
|
|||||||
|
|
||||||
- label: Metrics, Tracing (2 GPUs)
|
- label: Metrics, Tracing (2 GPUs)
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
- tests/v1/tracing
|
- tests/v1/tracing
|
||||||
@@ -114,7 +115,8 @@ steps:
|
|||||||
- pytest -v -s utils_
|
- pytest -v -s utils_
|
||||||
|
|
||||||
- label: Async Engine, Inputs, Utils, Worker, Config (CPU)
|
- label: Async Engine, Inputs, Utils, Worker, Config (CPU)
|
||||||
depends_on: ~
|
depends_on:
|
||||||
|
- image-build-cpu
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 30
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
@@ -127,7 +129,7 @@ steps:
|
|||||||
- tests/tool_parsers
|
- tests/tool_parsers
|
||||||
- tests/transformers_utils
|
- tests/transformers_utils
|
||||||
- tests/config
|
- tests/config
|
||||||
no_gpu: true
|
device: cpu
|
||||||
commands:
|
commands:
|
||||||
- python3 standalone_tests/lazy_imports.py
|
- python3 standalone_tests/lazy_imports.py
|
||||||
- pytest -v -s test_inputs.py
|
- pytest -v -s test_inputs.py
|
||||||
@@ -142,7 +144,7 @@ steps:
|
|||||||
- label: GPT-OSS Eval (B200)
|
- label: GPT-OSS Eval (B200)
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
working_dir: "/vllm-workspace/"
|
working_dir: "/vllm-workspace/"
|
||||||
gpu: b200
|
device: b200
|
||||||
optional: true
|
optional: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- tests/evals/gpt_oss
|
- tests/evals/gpt_oss
|
||||||
@@ -155,7 +157,7 @@ steps:
|
|||||||
|
|
||||||
- label: Batch Invariance (H100)
|
- label: Batch Invariance (H100)
|
||||||
timeout_in_minutes: 25
|
timeout_in_minutes: 25
|
||||||
gpu: h100
|
device: h100
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/v1/attention
|
- vllm/v1/attention
|
||||||
- vllm/model_executor/layers
|
- vllm/model_executor/layers
|
||||||
|
|||||||
@@ -39,12 +39,14 @@ steps:
|
|||||||
- pytest -v -s models/test_transformers.py models/test_registry.py
|
- pytest -v -s models/test_transformers.py models/test_registry.py
|
||||||
|
|
||||||
- label: Basic Models Test (Other CPU) # 5min
|
- label: Basic Models Test (Other CPU) # 5min
|
||||||
|
depends_on:
|
||||||
|
- image-build-cpu
|
||||||
timeout_in_minutes: 10
|
timeout_in_minutes: 10
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
- tests/models/test_utils.py
|
- tests/models/test_utils.py
|
||||||
- tests/models/test_vision.py
|
- tests/models/test_vision.py
|
||||||
no_gpu: true
|
device: cpu
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s models/test_utils.py models/test_vision.py
|
- pytest -v -s models/test_utils.py models/test_vision.py
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ steps:
|
|||||||
- label: Distributed Model Tests (2 GPUs)
|
- label: Distributed Model Tests (2 GPUs)
|
||||||
timeout_in_minutes: 50
|
timeout_in_minutes: 50
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/model_executor/model_loader/sharded_state_loader.py
|
- vllm/model_executor/model_loader/sharded_state_loader.py
|
||||||
- vllm/model_executor/models/
|
- vllm/model_executor/models/
|
||||||
|
|||||||
@@ -14,11 +14,13 @@ steps:
|
|||||||
- cd .. && VLLM_WORKER_MULTIPROC_METHOD=spawn pytest -v -s tests/models/multimodal/generation/test_whisper.py -m core_model # Otherwise, mp_method="spawn" doesn't work
|
- cd .. && VLLM_WORKER_MULTIPROC_METHOD=spawn pytest -v -s tests/models/multimodal/generation/test_whisper.py -m core_model # Otherwise, mp_method="spawn" doesn't work
|
||||||
|
|
||||||
- label: Multi-Modal Processor Test (CPU)
|
- label: Multi-Modal Processor Test (CPU)
|
||||||
|
depends_on:
|
||||||
|
- image-build-cpu
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
- tests/models/multimodal
|
- tests/models/multimodal
|
||||||
no_gpu: true
|
device: cpu
|
||||||
commands:
|
commands:
|
||||||
- pip install git+https://github.com/TIGER-AI-Lab/Mantis.git
|
- pip install git+https://github.com/TIGER-AI-Lab/Mantis.git
|
||||||
- pytest -v -s models/multimodal/processing --ignore models/multimodal/processing/test_tensor_schema.py
|
- pytest -v -s models/multimodal/processing --ignore models/multimodal/processing/test_tensor_schema.py
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ steps:
|
|||||||
- label: Plugin Tests (2 GPUs)
|
- label: Plugin Tests (2 GPUs)
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/plugins/
|
- vllm/plugins/
|
||||||
- tests/plugins/
|
- tests/plugins/
|
||||||
|
|||||||
@@ -16,14 +16,14 @@ steps:
|
|||||||
# https://github.com/pytorch/ao/issues/2919, we'll have to skip new torchao tests for now
|
# https://github.com/pytorch/ao/issues/2919, we'll have to skip new torchao tests for now
|
||||||
# we can only upgrade after this is resolved
|
# we can only upgrade after this is resolved
|
||||||
# TODO(jerryzh168): resolve the above comment
|
# TODO(jerryzh168): resolve the above comment
|
||||||
- uv pip install --system torchao==0.13.0 --index-url https://download.pytorch.org/whl/cu129
|
- uv pip install --system torchao==0.14.1 --index-url https://download.pytorch.org/whl/cu129
|
||||||
- uv pip install --system conch-triton-kernels
|
- uv pip install --system conch-triton-kernels
|
||||||
- VLLM_TEST_FORCE_LOAD_FORMAT=auto pytest -v -s quantization/ --ignore quantization/test_blackwell_moe.py
|
- VLLM_TEST_FORCE_LOAD_FORMAT=auto pytest -v -s quantization/ --ignore quantization/test_blackwell_moe.py
|
||||||
|
|
||||||
- label: Quantized MoE Test (B200)
|
- label: Quantized MoE Test (B200)
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
working_dir: "/vllm-workspace/"
|
working_dir: "/vllm-workspace/"
|
||||||
gpu: b200
|
device: b200
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- tests/quantization/test_blackwell_moe.py
|
- tests/quantization/test_blackwell_moe.py
|
||||||
- vllm/model_executor/models/deepseek_v2.py
|
- vllm/model_executor/models/deepseek_v2.py
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ steps:
|
|||||||
- label: Weight Loading Multiple GPU # 33min
|
- label: Weight Loading Multiple GPU # 33min
|
||||||
timeout_in_minutes: 45
|
timeout_in_minutes: 45
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
optional: true
|
optional: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
@@ -15,8 +15,8 @@ steps:
|
|||||||
|
|
||||||
- label: Weight Loading Multiple GPU - Large Models # optional
|
- label: Weight Loading Multiple GPU - Large Models # optional
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_gpus: 2
|
num_devices: 2
|
||||||
gpu: a100
|
device: a100
|
||||||
optional: true
|
optional: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
|
|||||||
16
.github/CODEOWNERS
vendored
16
.github/CODEOWNERS
vendored
@@ -2,8 +2,8 @@
|
|||||||
# for more info about CODEOWNERS file
|
# for more info about CODEOWNERS file
|
||||||
|
|
||||||
# This lists cover the "core" components of vLLM that require careful review
|
# This lists cover the "core" components of vLLM that require careful review
|
||||||
/vllm/attention @LucasWilkinson
|
|
||||||
/vllm/executor/executor_base.py @zhuohan123 @youkaichao @alexm-redhat @njhill @22quinn
|
/vllm/executor/executor_base.py @zhuohan123 @youkaichao @alexm-redhat @njhill @22quinn
|
||||||
|
/vllm/model_executor/layers/attention @LucasWilkinson
|
||||||
/vllm/model_executor/layers/fused_moe @mgoin @pavanimajety
|
/vllm/model_executor/layers/fused_moe @mgoin @pavanimajety
|
||||||
/vllm/model_executor/layers/quantization @mgoin @robertgshaw2-redhat @tlrmchlsmth @yewentao256 @pavanimajety
|
/vllm/model_executor/layers/quantization @mgoin @robertgshaw2-redhat @tlrmchlsmth @yewentao256 @pavanimajety
|
||||||
/vllm/model_executor/layers/mamba @tdoublep
|
/vllm/model_executor/layers/mamba @tdoublep
|
||||||
@@ -16,7 +16,7 @@
|
|||||||
/vllm/entrypoints @aarnphm @chaunceyjiang
|
/vllm/entrypoints @aarnphm @chaunceyjiang
|
||||||
/vllm/tool_parsers @aarnphm @chaunceyjiang
|
/vllm/tool_parsers @aarnphm @chaunceyjiang
|
||||||
/vllm/compilation @zou3519 @youkaichao @ProExpertProg
|
/vllm/compilation @zou3519 @youkaichao @ProExpertProg
|
||||||
/vllm/distributed/kv_transfer @NickLucche @ApostaC
|
/vllm/distributed/kv_transfer @NickLucche @ApostaC @orozery
|
||||||
CMakeLists.txt @tlrmchlsmth @LucasWilkinson
|
CMakeLists.txt @tlrmchlsmth @LucasWilkinson
|
||||||
|
|
||||||
# Any change to the VllmConfig changes can have a large user-facing impact,
|
# Any change to the VllmConfig changes can have a large user-facing impact,
|
||||||
@@ -30,12 +30,14 @@ CMakeLists.txt @tlrmchlsmth @LucasWilkinson
|
|||||||
/vllm/v1/attention/backends/mla @pavanimajety
|
/vllm/v1/attention/backends/mla @pavanimajety
|
||||||
/vllm/v1/attention/backends/flashinfer.py @mgoin @pavanimajety
|
/vllm/v1/attention/backends/flashinfer.py @mgoin @pavanimajety
|
||||||
/vllm/v1/attention/backends/triton_attn.py @tdoublep
|
/vllm/v1/attention/backends/triton_attn.py @tdoublep
|
||||||
/vllm/v1/core @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @alexm-redhat @heheda12345 @ApostaC
|
/vllm/v1/core @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @alexm-redhat @heheda12345 @ApostaC @orozery
|
||||||
/vllm/v1/sample @22quinn @houseroad @njhill
|
/vllm/v1/sample @22quinn @houseroad @njhill
|
||||||
/vllm/v1/spec_decode @benchislett @luccafong
|
/vllm/v1/spec_decode @benchislett @luccafong
|
||||||
/vllm/v1/structured_output @mgoin @russellb @aarnphm @benchislett
|
/vllm/v1/structured_output @mgoin @russellb @aarnphm @benchislett
|
||||||
/vllm/v1/kv_cache_interface.py @heheda12345
|
/vllm/v1/kv_cache_interface.py @heheda12345
|
||||||
/vllm/v1/offloading @ApostaC
|
/vllm/v1/kv_offload @ApostaC @orozery
|
||||||
|
/vllm/v1/worker/gpu/kv_connector.py @orozery
|
||||||
|
/vllm/v1/worker/kv_connector_model_runner_mixin.py @orozery
|
||||||
|
|
||||||
# Model runner V2
|
# Model runner V2
|
||||||
/vllm/v1/worker/gpu @WoosukKwon
|
/vllm/v1/worker/gpu @WoosukKwon
|
||||||
@@ -54,13 +56,13 @@ CMakeLists.txt @tlrmchlsmth @LucasWilkinson
|
|||||||
/tests/test_inputs.py @DarkLight1337 @ywang96
|
/tests/test_inputs.py @DarkLight1337 @ywang96
|
||||||
/tests/v1/entrypoints/llm/test_struct_output_generate.py @mgoin @russellb @aarnphm
|
/tests/v1/entrypoints/llm/test_struct_output_generate.py @mgoin @russellb @aarnphm
|
||||||
/tests/v1/structured_output @mgoin @russellb @aarnphm
|
/tests/v1/structured_output @mgoin @russellb @aarnphm
|
||||||
/tests/v1/core @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @alexm-redhat @heheda12345 @ApostaC
|
/tests/v1/core @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @alexm-redhat @heheda12345 @ApostaC @orozery
|
||||||
/tests/weight_loading @mgoin @youkaichao @yewentao256
|
/tests/weight_loading @mgoin @youkaichao @yewentao256
|
||||||
/tests/lora @jeejeelee
|
/tests/lora @jeejeelee
|
||||||
/tests/models/language/generation/test_hybrid.py @tdoublep
|
/tests/models/language/generation/test_hybrid.py @tdoublep
|
||||||
/tests/v1/kv_connector/nixl_integration @NickLucche
|
/tests/v1/kv_connector/nixl_integration @NickLucche
|
||||||
/tests/v1/kv_connector @ApostaC
|
/tests/v1/kv_connector @ApostaC @orozery
|
||||||
/tests/v1/offloading @ApostaC
|
/tests/v1/kv_offload @ApostaC @orozery
|
||||||
/tests/v1/determinism @yewentao256
|
/tests/v1/determinism @yewentao256
|
||||||
|
|
||||||
# Transformers modeling backend
|
# Transformers modeling backend
|
||||||
|
|||||||
@@ -154,6 +154,10 @@ repos:
|
|||||||
files: ^docker/(Dockerfile|versions\.json)$
|
files: ^docker/(Dockerfile|versions\.json)$
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
additional_dependencies: [dockerfile-parse]
|
additional_dependencies: [dockerfile-parse]
|
||||||
|
- id: attention-backend-docs
|
||||||
|
name: Check attention backend documentation is up to date
|
||||||
|
entry: python tools/pre_commit/generate_attention_backend_docs.py --check
|
||||||
|
language: python
|
||||||
# Keep `suggestion` last
|
# Keep `suggestion` last
|
||||||
- id: suggestion
|
- id: suggestion
|
||||||
name: Suggestion
|
name: Suggestion
|
||||||
|
|||||||
@@ -458,7 +458,6 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(MARLIN_SRCS
|
set(MARLIN_SRCS
|
||||||
"csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu"
|
|
||||||
"csrc/quantization/marlin/marlin.cu"
|
"csrc/quantization/marlin/marlin.cu"
|
||||||
"csrc/quantization/marlin/marlin_int4_fp8_preprocess.cu"
|
"csrc/quantization/marlin/marlin_int4_fp8_preprocess.cu"
|
||||||
"csrc/quantization/marlin/gptq_marlin_repack.cu"
|
"csrc/quantization/marlin/gptq_marlin_repack.cu"
|
||||||
|
|||||||
266
benchmarks/attention_benchmarks/README.md
Normal file
266
benchmarks/attention_benchmarks/README.md
Normal file
@@ -0,0 +1,266 @@
|
|||||||
|
# vLLM Attention Benchmarking Suite
|
||||||
|
|
||||||
|
Fast, flexible benchmarking for vLLM attention and MLA backends with an extended batch specification grammar.
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd benchmarks/attention_benchmarks
|
||||||
|
|
||||||
|
# Run a pre-configured benchmark
|
||||||
|
python benchmark.py --config configs/mla_decode.yaml
|
||||||
|
python benchmark.py --config configs/mla_mixed_batch.yaml
|
||||||
|
python benchmark.py --config configs/speculative_decode.yaml
|
||||||
|
python benchmark.py --config configs/standard_attention.yaml
|
||||||
|
python benchmark.py --config configs/reorder_threshold.yaml
|
||||||
|
|
||||||
|
# Or run custom benchmarks
|
||||||
|
python benchmark.py \
|
||||||
|
--backends flash flashinfer \
|
||||||
|
--batch-specs "q2k" "8q1s1k" "2q2k_32q1s1k" \
|
||||||
|
--output-csv results.csv
|
||||||
|
```
|
||||||
|
|
||||||
|
## Simplified Batch Specification Grammar
|
||||||
|
|
||||||
|
Express workloads concisely using query length and sequence length:
|
||||||
|
|
||||||
|
```python
|
||||||
|
"q2k" # 2048-token prefill (q_len=2048, seq_len=2048)
|
||||||
|
"q1s1k" # Decode: 1 token with 1K sequence
|
||||||
|
"8q1s1k" # 8 decode requests
|
||||||
|
"q4s1k" # 4-token extend (e.g., spec decode)
|
||||||
|
"2q2k_32q1s1k" # Mixed: 2 prefills + 32 decodes
|
||||||
|
"16q4s1k" # 16 spec decode (4 tokens each)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Grammar Rule
|
||||||
|
|
||||||
|
```text
|
||||||
|
Format: (<count>?) q<q_len>(k?) (s<seq_len>(k?))?
|
||||||
|
|
||||||
|
- count: Number of identical requests (optional, default=1)
|
||||||
|
- q_len: Query length (number of new tokens)
|
||||||
|
- seq_len: Total sequence length (optional, defaults to q_len for prefill)
|
||||||
|
- 'k': Multiplies value by 1024
|
||||||
|
|
||||||
|
Mixed batches: Use _ to combine (e.g., "2q2k_32q1s1k")
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note**: Decode, prefill, and spec decode are just different query lengths - no special syntax needed!
|
||||||
|
|
||||||
|
## Pre-configured Benchmarks
|
||||||
|
|
||||||
|
The suite includes several pre-configured YAML benchmark configurations:
|
||||||
|
|
||||||
|
### MLA Decode Benchmark
|
||||||
|
|
||||||
|
Tests pure decode performance across MLA backends with varying batch sizes and sequence lengths.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python benchmark.py --config configs/mla_decode.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### MLA Mixed Batch Benchmark
|
||||||
|
|
||||||
|
Tests chunked prefill performance with mixed prefill + decode batches.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python benchmark.py --config configs/mla_mixed_batch.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Speculative Decoding Benchmark
|
||||||
|
|
||||||
|
Tests speculative decode scenarios (K-token verification) and reorder_batch_threshold optimization.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python benchmark.py --config configs/speculative_decode.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Standard Attention Benchmark
|
||||||
|
|
||||||
|
Tests standard attention backends (Flash/Triton/FlashInfer) with pure prefill, decode, and mixed batches.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python benchmark.py --config configs/standard_attention.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Reorder Threshold Study
|
||||||
|
|
||||||
|
**Question:** At what query length does the prefill pipeline become faster than the decode pipeline?
|
||||||
|
|
||||||
|
Tests query lengths from 1-1024 across 9 batch sizes to find the crossover point. Uses `decode_vs_prefill` mode to compare both pipelines for each query length.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python benchmark.py --config configs/reorder_threshold.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Universal Benchmark
|
||||||
|
|
||||||
|
The `benchmark.py` script handles **all** backends - both standard attention and MLA.
|
||||||
|
|
||||||
|
### Standard Attention (Flash/Triton/FlashInfer)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python benchmark.py \
|
||||||
|
--backends flash triton flashinfer \
|
||||||
|
--batch-specs "q2k" "8q1s1k" "2q2k_32q1s1k" \
|
||||||
|
--num-layers 10 \
|
||||||
|
--repeats 5 \
|
||||||
|
--output-csv results.csv
|
||||||
|
```
|
||||||
|
|
||||||
|
### MLA Backends
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Compare all MLA backends
|
||||||
|
python benchmark.py \
|
||||||
|
--backends cutlass_mla flashinfer_mla flashattn_mla flashmla \
|
||||||
|
--batch-specs "64q1s1k" "64q1s4k" \
|
||||||
|
--output-csv mla_results.csv
|
||||||
|
```
|
||||||
|
|
||||||
|
### Parameter Sweeps
|
||||||
|
|
||||||
|
Use `--sweep-param` and `--sweep-values` to run parameter sweeps from the CLI:
|
||||||
|
|
||||||
|
#### CUTLASS MLA num-splits Optimization
|
||||||
|
|
||||||
|
**Question:** What is the optimal `num_kv_splits` for CUTLASS MLA?
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python benchmark.py \
|
||||||
|
--backend cutlass_mla \
|
||||||
|
--batch-specs "64q1s1k" "64q1s4k" "64q1s16k" \
|
||||||
|
--sweep-param num_kv_splits \
|
||||||
|
--sweep-values 1 2 4 8 16 \
|
||||||
|
--output-json optimal_splits.json
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Reorder Batch Threshold Optimization
|
||||||
|
|
||||||
|
**Question:** What's the optimal `reorder_batch_threshold` for speculative decoding?
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python benchmark.py \
|
||||||
|
--backend flashmla \
|
||||||
|
--batch-specs "q4s1k" "q8s2k" \
|
||||||
|
--sweep-param reorder_batch_threshold \
|
||||||
|
--sweep-values 1 4 16 64 256 512 \
|
||||||
|
--output-csv threshold_sweep.csv
|
||||||
|
```
|
||||||
|
|
||||||
|
### All Command-Line Options
|
||||||
|
|
||||||
|
```text
|
||||||
|
--config CONFIG # Path to YAML config file (overrides other args)
|
||||||
|
--backends BACKEND [BACKEND ...] # flash, triton, flashinfer, cutlass_mla,
|
||||||
|
# flashinfer_mla, flashattn_mla, flashmla
|
||||||
|
--backend BACKEND # Single backend (alternative to --backends)
|
||||||
|
--batch-specs SPEC [SPEC ...] # Batch specifications using extended grammar
|
||||||
|
|
||||||
|
# Model configuration
|
||||||
|
--num-layers N # Number of layers
|
||||||
|
--head-dim N # Head dimension
|
||||||
|
--num-q-heads N # Query heads
|
||||||
|
--num-kv-heads N # KV heads
|
||||||
|
--block-size N # Block size
|
||||||
|
|
||||||
|
# Benchmark settings
|
||||||
|
--device DEVICE # Device (default: cuda:0)
|
||||||
|
--repeats N # Repetitions
|
||||||
|
--warmup-iters N # Warmup iterations
|
||||||
|
--profile-memory # Profile memory usage
|
||||||
|
|
||||||
|
# Parameter sweeps
|
||||||
|
--sweep-param PARAM # Parameter name to sweep (e.g., num_kv_splits,
|
||||||
|
# reorder_batch_threshold)
|
||||||
|
--sweep-values N [N ...] # Values to sweep for the parameter
|
||||||
|
|
||||||
|
# Output
|
||||||
|
--output-csv FILE # Save to CSV
|
||||||
|
--output-json FILE # Save to JSON
|
||||||
|
```
|
||||||
|
|
||||||
|
## Hardware Requirements
|
||||||
|
|
||||||
|
| Backend | Hardware |
|
||||||
|
|---------|----------|
|
||||||
|
| Flash/Triton/FlashInfer | Any CUDA GPU |
|
||||||
|
| CUTLASS MLA | Blackwell (SM100+) |
|
||||||
|
| FlashAttn MLA | Hopper (SM90+) |
|
||||||
|
| FlashMLA | Hopper (SM90+) |
|
||||||
|
| FlashInfer-MLA | Any CUDA GPU |
|
||||||
|
|
||||||
|
## Using MLA Runner Directly
|
||||||
|
|
||||||
|
All MLA backends are available through `mla_runner.run_mla_benchmark()`:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from mla_runner import run_mla_benchmark
|
||||||
|
from common import BenchmarkConfig
|
||||||
|
|
||||||
|
config = BenchmarkConfig(
|
||||||
|
backend="cutlass_mla",
|
||||||
|
batch_spec="64q1s4k",
|
||||||
|
num_layers=10,
|
||||||
|
head_dim=576,
|
||||||
|
num_q_heads=128,
|
||||||
|
num_kv_heads=1,
|
||||||
|
block_size=128,
|
||||||
|
device="cuda:0",
|
||||||
|
repeats=5,
|
||||||
|
warmup_iters=3,
|
||||||
|
)
|
||||||
|
|
||||||
|
# CUTLASS MLA with specific num_kv_splits
|
||||||
|
result = run_mla_benchmark("cutlass_mla", config, num_kv_splits=4)
|
||||||
|
print(f"Time: {result.mean_time:.6f}s")
|
||||||
|
|
||||||
|
# FlashInfer-MLA
|
||||||
|
result = run_mla_benchmark("flashinfer_mla", config)
|
||||||
|
|
||||||
|
# FlashAttn MLA (Hopper SM90+)
|
||||||
|
result = run_mla_benchmark("flashattn_mla", config, reorder_batch_threshold=64)
|
||||||
|
|
||||||
|
# FlashMLA (Hopper SM90+)
|
||||||
|
result = run_mla_benchmark("flashmla", config, reorder_batch_threshold=64)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Python API
|
||||||
|
|
||||||
|
```python
|
||||||
|
from batch_spec import parse_batch_spec, format_batch_spec, get_batch_stats
|
||||||
|
from common import BenchmarkConfig, BenchmarkResult, ResultsFormatter
|
||||||
|
|
||||||
|
# Parse batch specs
|
||||||
|
requests = parse_batch_spec("2q2k_q4s1k_32q1s1k")
|
||||||
|
print(format_batch_spec(requests))
|
||||||
|
# "2 prefill (2x2k), 1 extend (1xq4kv1k), 32 decode (32x1k)"
|
||||||
|
|
||||||
|
# Get batch statistics
|
||||||
|
stats = get_batch_stats(requests)
|
||||||
|
print(f"Total tokens: {stats['total_tokens']}")
|
||||||
|
print(f"Num decode: {stats['num_decode']}, Num prefill: {stats['num_prefill']}")
|
||||||
|
|
||||||
|
# Format results
|
||||||
|
formatter = ResultsFormatter()
|
||||||
|
formatter.save_csv(results, "output.csv")
|
||||||
|
formatter.save_json(results, "output.json")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Tips
|
||||||
|
|
||||||
|
**1. Warmup matters** - Use `--warmup-iters 10` for stable results
|
||||||
|
|
||||||
|
**2. Multiple repeats** - Use `--repeats 20` for low variance
|
||||||
|
|
||||||
|
**3. Save results** - Always use `--output-csv` or `--output-json`
|
||||||
|
|
||||||
|
**4. Test incrementally** - Start with `--num-layers 1 --repeats 1`
|
||||||
|
|
||||||
|
**5. Extended grammar** - Leverage spec decode, chunked prefill patterns
|
||||||
|
|
||||||
|
**6. Parameter sweeps** - Use `--sweep-param` and `--sweep-values` to find optimal values
|
||||||
44
benchmarks/attention_benchmarks/__init__.py
Normal file
44
benchmarks/attention_benchmarks/__init__.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
|
"""vLLM Attention Benchmarking Suite."""
|
||||||
|
|
||||||
|
from .batch_spec import (
|
||||||
|
BatchRequest,
|
||||||
|
format_batch_spec,
|
||||||
|
get_batch_stats,
|
||||||
|
parse_batch_spec,
|
||||||
|
reorder_for_flashinfer,
|
||||||
|
split_by_type,
|
||||||
|
)
|
||||||
|
from .common import (
|
||||||
|
BenchmarkConfig,
|
||||||
|
BenchmarkResult,
|
||||||
|
MockLayer,
|
||||||
|
MockModelConfig,
|
||||||
|
ResultsFormatter,
|
||||||
|
get_attention_scale,
|
||||||
|
is_mla_backend,
|
||||||
|
setup_mla_dims,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
# Batch specification
|
||||||
|
"BatchRequest",
|
||||||
|
"parse_batch_spec",
|
||||||
|
"format_batch_spec",
|
||||||
|
"reorder_for_flashinfer",
|
||||||
|
"split_by_type",
|
||||||
|
"get_batch_stats",
|
||||||
|
# Benchmarking infrastructure
|
||||||
|
"BenchmarkConfig",
|
||||||
|
"BenchmarkResult",
|
||||||
|
"ResultsFormatter",
|
||||||
|
# Mock objects
|
||||||
|
"MockLayer",
|
||||||
|
"MockModelConfig",
|
||||||
|
# Utilities
|
||||||
|
"setup_mla_dims",
|
||||||
|
"get_attention_scale",
|
||||||
|
"is_mla_backend",
|
||||||
|
]
|
||||||
231
benchmarks/attention_benchmarks/batch_spec.py
Normal file
231
benchmarks/attention_benchmarks/batch_spec.py
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
|
"""
|
||||||
|
Simplified batch specification grammar for attention benchmarks.
|
||||||
|
|
||||||
|
Grammar (underscore-separated segments):
|
||||||
|
Format: (<count>?) q<q_len>(k?) (s<seq_len>(k?))?
|
||||||
|
|
||||||
|
- count: Number of identical requests (optional, default=1)
|
||||||
|
- q_len: Query length (number of new tokens)
|
||||||
|
- seq_len: Total sequence length (optional, defaults to q_len for prefill)
|
||||||
|
- 'k' suffix: Multiplies value by 1024
|
||||||
|
|
||||||
|
Common patterns:
|
||||||
|
- Prefill: q_len == seq_len (e.g., "q2k" → 2048 new tokens, 2048 seq)
|
||||||
|
- Decode: q_len == 1 (e.g., "q1s1k" → 1 token, 1024 seq length)
|
||||||
|
- Extend: q_len < seq_len (e.g., "q4s1k" → 4 tokens, 1024 seq length)
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
q2k -> [(2048, 2048)] # Prefill: 2048 tokens
|
||||||
|
q1s1k -> [(1, 1024)] # Decode: 1 token, 1K sequence
|
||||||
|
8q1s1k -> [(1, 1024)] * 8 # 8 decode requests
|
||||||
|
q4s1k -> [(4, 1024)] # 4-token extend (spec decode)
|
||||||
|
2q1k_32q1s1k -> [(1024, 1024)] * 2 + [(1, 1024)] * 32 # Mixed batch
|
||||||
|
16q4s1k -> [(4, 1024)] * 16 # 16 spec decode requests
|
||||||
|
"""
|
||||||
|
|
||||||
|
from collections import Counter
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
import regex as re
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BatchRequest:
|
||||||
|
"""Represents a single request in a batch."""
|
||||||
|
|
||||||
|
q_len: int # Query length (number of new tokens)
|
||||||
|
kv_len: int # Total KV cache length
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_decode(self) -> bool:
|
||||||
|
"""True if this is a decode request (q_len == 1)."""
|
||||||
|
return self.q_len == 1
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_prefill(self) -> bool:
|
||||||
|
"""True if this is a pure prefill (q_len == kv_len)."""
|
||||||
|
return self.q_len == self.kv_len
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_extend(self) -> bool:
|
||||||
|
"""True if this is context extension (q_len > 1, kv_len > q_len)."""
|
||||||
|
return self.q_len > 1 and self.kv_len > self.q_len
|
||||||
|
|
||||||
|
@property
|
||||||
|
def context_len(self) -> int:
|
||||||
|
"""Context length (KV cache - query)."""
|
||||||
|
return self.kv_len - self.q_len
|
||||||
|
|
||||||
|
def as_tuple(self) -> tuple[int, int]:
|
||||||
|
"""Return as (q_len, kv_len) tuple for compatibility."""
|
||||||
|
return (self.q_len, self.kv_len)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_size(size_str: str, k_suffix: str) -> int:
|
||||||
|
"""Parse size string with optional 'k' suffix."""
|
||||||
|
size = int(size_str)
|
||||||
|
return size * 1024 if k_suffix == "k" else size
|
||||||
|
|
||||||
|
|
||||||
|
def parse_batch_spec(spec: str) -> list[BatchRequest]:
|
||||||
|
"""
|
||||||
|
Parse batch specification string into list of BatchRequest objects.
|
||||||
|
|
||||||
|
Grammar: (<count>?) q<q_len>(k?) (s<seq_len>(k?))?
|
||||||
|
|
||||||
|
Args:
|
||||||
|
spec: Batch specification string (see module docstring for grammar)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of BatchRequest objects
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If spec format is invalid
|
||||||
|
"""
|
||||||
|
requests = []
|
||||||
|
|
||||||
|
for seg in spec.split("_"):
|
||||||
|
# Unified pattern: (<count>?) q<q_len>(k?) (s<seq_len>(k?))?
|
||||||
|
m = re.match(r"^(?:(\d+))?q(\d+)(k?)(?:s(\d+)(k?))?$", seg)
|
||||||
|
if m:
|
||||||
|
cnt = int(m.group(1)) if m.group(1) else 1
|
||||||
|
q_len = _parse_size(m.group(2), m.group(3))
|
||||||
|
kv_len = _parse_size(m.group(4), m.group(5)) if m.group(4) else q_len
|
||||||
|
requests.extend([BatchRequest(q_len=q_len, kv_len=kv_len)] * cnt)
|
||||||
|
continue
|
||||||
|
|
||||||
|
raise ValueError(f"Invalid batch spec segment: '{seg}'")
|
||||||
|
|
||||||
|
return requests
|
||||||
|
|
||||||
|
|
||||||
|
def format_batch_spec(requests: list[BatchRequest]) -> str:
|
||||||
|
"""
|
||||||
|
Format list of BatchRequest into human-readable string.
|
||||||
|
|
||||||
|
Groups requests by type and provides counts and sizes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requests: List of BatchRequest objects
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted string describing the batch
|
||||||
|
"""
|
||||||
|
kinds = {
|
||||||
|
"prefill": [],
|
||||||
|
"extend": [],
|
||||||
|
"decode": [],
|
||||||
|
}
|
||||||
|
|
||||||
|
for req in requests:
|
||||||
|
tup = (req.q_len, req.kv_len)
|
||||||
|
if req.is_prefill:
|
||||||
|
kinds["prefill"].append(tup)
|
||||||
|
elif req.is_extend:
|
||||||
|
kinds["extend"].append(tup)
|
||||||
|
elif req.is_decode:
|
||||||
|
kinds["decode"].append(tup)
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
for kind in ["prefill", "extend", "decode"]:
|
||||||
|
lst = kinds[kind]
|
||||||
|
if not lst:
|
||||||
|
continue
|
||||||
|
|
||||||
|
cnt_total = len(lst)
|
||||||
|
ctr = Counter(lst)
|
||||||
|
inner = []
|
||||||
|
|
||||||
|
for (q, kv), cnt in ctr.items():
|
||||||
|
if kind == "prefill":
|
||||||
|
size = f"{q // 1024}k" if q % 1024 == 0 else str(q)
|
||||||
|
inner.append(f"{cnt}x{size}")
|
||||||
|
elif kind == "decode":
|
||||||
|
size = f"{kv // 1024}k" if kv % 1024 == 0 else str(kv)
|
||||||
|
inner.append(f"{cnt}x{size}")
|
||||||
|
else: # extend
|
||||||
|
qstr = f"{q // 1024}k" if q % 1024 == 0 else str(q)
|
||||||
|
kstr = f"{kv // 1024}k" if kv % 1024 == 0 else str(kv)
|
||||||
|
inner.append(f"{cnt}xq{qstr}kv{kstr}")
|
||||||
|
|
||||||
|
parts.append(f"{cnt_total} {kind} ({', '.join(inner)})")
|
||||||
|
|
||||||
|
return ", ".join(parts)
|
||||||
|
|
||||||
|
|
||||||
|
def reorder_for_flashinfer(requests: list[BatchRequest]) -> list[BatchRequest]:
|
||||||
|
"""
|
||||||
|
Reorder requests for FlashInfer: decode first, then prefill.
|
||||||
|
|
||||||
|
FlashInfer expects decode requests before prefill requests for
|
||||||
|
optimal performance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requests: Original list of BatchRequest
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Reordered list with decode requests first
|
||||||
|
"""
|
||||||
|
decodes = [r for r in requests if r.is_decode]
|
||||||
|
non_decodes = [r for r in requests if not r.is_decode]
|
||||||
|
return decodes + non_decodes
|
||||||
|
|
||||||
|
|
||||||
|
def split_by_type(
|
||||||
|
requests: list[BatchRequest],
|
||||||
|
) -> dict[str, list[BatchRequest]]:
|
||||||
|
"""
|
||||||
|
Split requests by type for analysis.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requests: List of BatchRequest
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with keys: 'decode', 'prefill', 'extend'
|
||||||
|
"""
|
||||||
|
result = {
|
||||||
|
"decode": [],
|
||||||
|
"prefill": [],
|
||||||
|
"extend": [],
|
||||||
|
}
|
||||||
|
|
||||||
|
for req in requests:
|
||||||
|
if req.is_decode:
|
||||||
|
result["decode"].append(req)
|
||||||
|
elif req.is_prefill:
|
||||||
|
result["prefill"].append(req)
|
||||||
|
elif req.is_extend:
|
||||||
|
result["extend"].append(req)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def get_batch_stats(requests: list[BatchRequest]) -> dict:
|
||||||
|
"""
|
||||||
|
Compute statistics about a batch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requests: List of BatchRequest
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with batch statistics
|
||||||
|
"""
|
||||||
|
by_type = split_by_type(requests)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_requests": len(requests),
|
||||||
|
"num_decode": len(by_type["decode"]),
|
||||||
|
"num_prefill": len(by_type["prefill"]),
|
||||||
|
"num_extend": len(by_type["extend"]),
|
||||||
|
"total_tokens": sum(r.q_len for r in requests),
|
||||||
|
"total_kv_cache": sum(r.kv_len for r in requests),
|
||||||
|
"max_q_len": max((r.q_len for r in requests), default=0),
|
||||||
|
"max_kv_len": max((r.kv_len for r in requests), default=0),
|
||||||
|
"avg_q_len": sum(r.q_len for r in requests) / len(requests) if requests else 0,
|
||||||
|
"avg_kv_len": (
|
||||||
|
sum(r.kv_len for r in requests) / len(requests) if requests else 0
|
||||||
|
),
|
||||||
|
}
|
||||||
886
benchmarks/attention_benchmarks/benchmark.py
Normal file
886
benchmarks/attention_benchmarks/benchmark.py
Normal file
@@ -0,0 +1,886 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
|
"""
|
||||||
|
Universal vLLM Attention Benchmark
|
||||||
|
|
||||||
|
Benchmark any attention backend with the extended grammar.
|
||||||
|
Supports standard attention (Flash/Triton/FlashInfer) and MLA backends.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
# Standard attention
|
||||||
|
python benchmark.py --backends flash flashinfer --batch-specs "q2k" "8q1s1k"
|
||||||
|
|
||||||
|
# MLA backends
|
||||||
|
python benchmark.py --backends cutlass_mla flashinfer_mla --batch-specs "64q1s1k"
|
||||||
|
|
||||||
|
# Parameter sweep (CLI)
|
||||||
|
python benchmark.py --backend cutlass_mla \
|
||||||
|
--batch-specs "64q1s1k" \
|
||||||
|
--sweep-param num_kv_splits \
|
||||||
|
--sweep-values 1 4 8 16
|
||||||
|
|
||||||
|
# Parameter sweep (YAML config - recommended)
|
||||||
|
python benchmark.py --config configs/cutlass_numsplits.yaml
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
from dataclasses import replace
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from rich.console import Console
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||||
|
|
||||||
|
from batch_spec import parse_batch_spec
|
||||||
|
from common import (
|
||||||
|
BenchmarkConfig,
|
||||||
|
BenchmarkResult,
|
||||||
|
ModelParameterSweep,
|
||||||
|
ParameterSweep,
|
||||||
|
ResultsFormatter,
|
||||||
|
is_mla_backend,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_standard_attention_benchmark(config: BenchmarkConfig) -> BenchmarkResult:
|
||||||
|
"""Run standard attention benchmark (Flash/Triton/FlashInfer)."""
|
||||||
|
from runner import run_attention_benchmark
|
||||||
|
|
||||||
|
return run_attention_benchmark(config)
|
||||||
|
|
||||||
|
|
||||||
|
def run_mla_benchmark(config: BenchmarkConfig, **kwargs) -> BenchmarkResult:
|
||||||
|
"""Run MLA benchmark with appropriate backend."""
|
||||||
|
from mla_runner import run_mla_benchmark as run_mla
|
||||||
|
|
||||||
|
return run_mla(config.backend, config, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def run_benchmark(config: BenchmarkConfig, **kwargs) -> BenchmarkResult:
|
||||||
|
"""
|
||||||
|
Run a single benchmark with proper backend selection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: BenchmarkConfig with backend, batch_spec, and model params
|
||||||
|
**kwargs: Additional arguments passed to MLA benchmarks
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BenchmarkResult (may have error field set on failure)
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if is_mla_backend(config.backend):
|
||||||
|
return run_mla_benchmark(config, **kwargs)
|
||||||
|
else:
|
||||||
|
return run_standard_attention_benchmark(config)
|
||||||
|
except Exception as e:
|
||||||
|
return BenchmarkResult(
|
||||||
|
config=config,
|
||||||
|
mean_time=float("inf"),
|
||||||
|
std_time=0,
|
||||||
|
min_time=float("inf"),
|
||||||
|
max_time=float("inf"),
|
||||||
|
error=str(e),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_model_parameter_sweep(
|
||||||
|
backends: list[str],
|
||||||
|
batch_specs: list[str],
|
||||||
|
base_config_args: dict,
|
||||||
|
sweep: ModelParameterSweep,
|
||||||
|
console: Console,
|
||||||
|
) -> list[BenchmarkResult]:
|
||||||
|
"""
|
||||||
|
Run model parameter sweep for given backends and batch specs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
backends: List of backend names
|
||||||
|
batch_specs: List of batch specifications
|
||||||
|
base_config_args: Base configuration arguments (num_layers, head_dim, etc.)
|
||||||
|
sweep: ModelParameterSweep configuration
|
||||||
|
console: Rich console for output
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of BenchmarkResult objects
|
||||||
|
"""
|
||||||
|
all_results = []
|
||||||
|
|
||||||
|
console.print(
|
||||||
|
f"[yellow]Model sweep mode: testing {sweep.param_name} = {sweep.values}[/]"
|
||||||
|
)
|
||||||
|
|
||||||
|
total = len(backends) * len(batch_specs) * len(sweep.values)
|
||||||
|
|
||||||
|
with tqdm(total=total, desc="Benchmarking") as pbar:
|
||||||
|
for backend in backends:
|
||||||
|
for spec in batch_specs:
|
||||||
|
for value in sweep.values:
|
||||||
|
# Create config with modified model parameter
|
||||||
|
config_args = base_config_args.copy()
|
||||||
|
config_args[sweep.param_name] = value
|
||||||
|
|
||||||
|
# Create config with original backend for running
|
||||||
|
clean_config = BenchmarkConfig(
|
||||||
|
backend=backend, batch_spec=spec, **config_args
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run benchmark
|
||||||
|
result = run_benchmark(clean_config)
|
||||||
|
|
||||||
|
# Replace backend with labeled version for display
|
||||||
|
backend_label = sweep.get_label(backend, value)
|
||||||
|
labeled_config = replace(result.config, backend=backend_label)
|
||||||
|
result = replace(result, config=labeled_config)
|
||||||
|
all_results.append(result)
|
||||||
|
|
||||||
|
if not result.success:
|
||||||
|
console.print(
|
||||||
|
f"[red]Error {backend} {spec} {sweep.param_name}="
|
||||||
|
f"{value}: {result.error}[/]"
|
||||||
|
)
|
||||||
|
|
||||||
|
pbar.update(1)
|
||||||
|
|
||||||
|
# Display sweep results - create separate table for each parameter value
|
||||||
|
console.print("\n[bold green]Model Parameter Sweep Results:[/]")
|
||||||
|
formatter = ResultsFormatter(console)
|
||||||
|
|
||||||
|
# Group results by parameter value and extract backend mapping
|
||||||
|
by_param_value = {}
|
||||||
|
backend_mapping = {} # Maps labeled backend -> original backend
|
||||||
|
|
||||||
|
for r in all_results:
|
||||||
|
# Extract original backend and param value from labeled backend
|
||||||
|
# The label format is: {backend}_{param_name}_{value}
|
||||||
|
# We need to reverse engineer this
|
||||||
|
labeled_backend = r.config.backend
|
||||||
|
|
||||||
|
# Try each backend to find which one this result belongs to
|
||||||
|
for backend in backends:
|
||||||
|
for value in sweep.values:
|
||||||
|
expected_label = sweep.get_label(backend, value)
|
||||||
|
if labeled_backend == expected_label:
|
||||||
|
backend_mapping[labeled_backend] = backend
|
||||||
|
param_value = str(value)
|
||||||
|
|
||||||
|
if param_value not in by_param_value:
|
||||||
|
by_param_value[param_value] = []
|
||||||
|
by_param_value[param_value].append(r)
|
||||||
|
break
|
||||||
|
|
||||||
|
# Create a table for each parameter value
|
||||||
|
sorted_param_values = sorted(
|
||||||
|
by_param_value.keys(), key=lambda x: int(x) if x.isdigit() else x
|
||||||
|
)
|
||||||
|
|
||||||
|
for param_value in sorted_param_values:
|
||||||
|
console.print(f"\n[bold cyan]{sweep.param_name} = {param_value}[/]")
|
||||||
|
param_results = by_param_value[param_value]
|
||||||
|
|
||||||
|
# Create modified results with original backend names
|
||||||
|
modified_results = []
|
||||||
|
for r in param_results:
|
||||||
|
# Get the original backend name from our mapping
|
||||||
|
original_backend = backend_mapping[r.config.backend]
|
||||||
|
modified_config = replace(r.config, backend=original_backend)
|
||||||
|
modified_result = replace(r, config=modified_config)
|
||||||
|
modified_results.append(modified_result)
|
||||||
|
|
||||||
|
# Print table with original backend names
|
||||||
|
formatter.print_table(modified_results, backends, compare_to_fastest=True)
|
||||||
|
|
||||||
|
# Show optimal backend for each (param_value, batch_spec) combination
|
||||||
|
console.print(
|
||||||
|
f"\n[bold cyan]Optimal backend for each ({sweep.param_name}, batch_spec):[/]"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Group by (param_value, batch_spec)
|
||||||
|
by_param_and_spec = {}
|
||||||
|
for r in all_results:
|
||||||
|
if r.success:
|
||||||
|
# Find which (backend, value) this result corresponds to
|
||||||
|
labeled_backend = r.config.backend
|
||||||
|
for backend in backends:
|
||||||
|
for value in sweep.values:
|
||||||
|
expected_label = sweep.get_label(backend, value)
|
||||||
|
if labeled_backend == expected_label:
|
||||||
|
param_value = str(value)
|
||||||
|
spec = r.config.batch_spec
|
||||||
|
key = (param_value, spec)
|
||||||
|
|
||||||
|
if key not in by_param_and_spec:
|
||||||
|
by_param_and_spec[key] = []
|
||||||
|
by_param_and_spec[key].append(r)
|
||||||
|
break
|
||||||
|
|
||||||
|
# Sort by param value then spec
|
||||||
|
sorted_keys = sorted(
|
||||||
|
by_param_and_spec.keys(),
|
||||||
|
key=lambda x: (int(x[0]) if x[0].isdigit() else x[0], x[1]),
|
||||||
|
)
|
||||||
|
|
||||||
|
current_param_value = None
|
||||||
|
for param_value, spec in sorted_keys:
|
||||||
|
# Print header when param value changes
|
||||||
|
if param_value != current_param_value:
|
||||||
|
console.print(f"\n [bold]{sweep.param_name}={param_value}:[/]")
|
||||||
|
current_param_value = param_value
|
||||||
|
|
||||||
|
results = by_param_and_spec[(param_value, spec)]
|
||||||
|
best = min(results, key=lambda r: r.mean_time)
|
||||||
|
|
||||||
|
# Extract original backend name using the mapping
|
||||||
|
backend_name = backend_mapping[best.config.backend]
|
||||||
|
|
||||||
|
# Show all backends' times for comparison
|
||||||
|
times_str = " | ".join(
|
||||||
|
[
|
||||||
|
f"{backend_mapping[r.config.backend]}: {r.mean_time:.6f}s"
|
||||||
|
for r in sorted(results, key=lambda r: r.mean_time)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
console.print(
|
||||||
|
f" {spec:12s} -> [bold green]{backend_name:15s}[/] ({times_str})"
|
||||||
|
)
|
||||||
|
|
||||||
|
return all_results
|
||||||
|
|
||||||
|
|
||||||
|
def run_parameter_sweep(
|
||||||
|
backends: list[str],
|
||||||
|
batch_specs: list[str],
|
||||||
|
base_config_args: dict,
|
||||||
|
sweep: ParameterSweep,
|
||||||
|
console: Console,
|
||||||
|
) -> list[BenchmarkResult]:
|
||||||
|
"""
|
||||||
|
Run parameter sweep for given backends and batch specs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
backends: List of backend names
|
||||||
|
batch_specs: List of batch specifications
|
||||||
|
base_config_args: Base configuration arguments (num_layers, head_dim, etc.)
|
||||||
|
sweep: ParameterSweep configuration
|
||||||
|
console: Rich console for output
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of BenchmarkResult objects
|
||||||
|
"""
|
||||||
|
all_results = []
|
||||||
|
|
||||||
|
# Build list of values to sweep (including auto if requested)
|
||||||
|
sweep_values = list(sweep.values)
|
||||||
|
if sweep.include_auto:
|
||||||
|
sweep_values.append("auto")
|
||||||
|
|
||||||
|
console.print(f"[yellow]Sweep mode: testing {sweep.param_name} = {sweep_values}[/]")
|
||||||
|
|
||||||
|
total = len(backends) * len(batch_specs) * len(sweep_values)
|
||||||
|
|
||||||
|
with tqdm(total=total, desc="Benchmarking") as pbar:
|
||||||
|
for backend in backends:
|
||||||
|
for spec in batch_specs:
|
||||||
|
for value in sweep_values:
|
||||||
|
# Create config with original backend for running
|
||||||
|
config = BenchmarkConfig(
|
||||||
|
backend=backend, batch_spec=spec, **base_config_args
|
||||||
|
)
|
||||||
|
|
||||||
|
# Prepare kwargs for benchmark runner
|
||||||
|
kwargs = {}
|
||||||
|
if value != "auto":
|
||||||
|
kwargs[sweep.param_name] = value
|
||||||
|
|
||||||
|
# Run benchmark
|
||||||
|
result = run_benchmark(config, **kwargs)
|
||||||
|
|
||||||
|
# Replace backend with labeled version for display
|
||||||
|
backend_label = sweep.get_label(backend, value)
|
||||||
|
labeled_config = replace(result.config, backend=backend_label)
|
||||||
|
result = replace(result, config=labeled_config)
|
||||||
|
all_results.append(result)
|
||||||
|
|
||||||
|
if not result.success:
|
||||||
|
console.print(
|
||||||
|
f"[red]Error {backend} {spec} {sweep.param_name}="
|
||||||
|
f"{value}: {result.error}[/]"
|
||||||
|
)
|
||||||
|
|
||||||
|
pbar.update(1)
|
||||||
|
|
||||||
|
# Display sweep results
|
||||||
|
console.print("\n[bold green]Sweep Results:[/]")
|
||||||
|
backend_labels = [sweep.get_label(b, v) for b in backends for v in sweep_values]
|
||||||
|
formatter = ResultsFormatter(console)
|
||||||
|
formatter.print_table(all_results, backend_labels)
|
||||||
|
|
||||||
|
# Show optimal values
|
||||||
|
console.print(f"\n[bold cyan]Optimal {sweep.param_name} per batch spec:[/]")
|
||||||
|
by_spec = {}
|
||||||
|
for r in all_results:
|
||||||
|
if r.success:
|
||||||
|
spec = r.config.batch_spec
|
||||||
|
if spec not in by_spec:
|
||||||
|
by_spec[spec] = []
|
||||||
|
by_spec[spec].append(r)
|
||||||
|
|
||||||
|
for spec in sorted(by_spec.keys()):
|
||||||
|
results = by_spec[spec]
|
||||||
|
best = min(results, key=lambda r: r.mean_time)
|
||||||
|
console.print(
|
||||||
|
f" {spec}: [bold green]{best.config.backend}[/] ({best.mean_time:.6f}s)"
|
||||||
|
)
|
||||||
|
|
||||||
|
return all_results
|
||||||
|
|
||||||
|
|
||||||
|
def load_config_from_yaml(config_path: str) -> dict:
|
||||||
|
"""Load configuration from YAML file."""
|
||||||
|
with open(config_path) as f:
|
||||||
|
return yaml.safe_load(f)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_batch_specs_from_ranges(ranges: list[dict]) -> list[str]:
|
||||||
|
"""
|
||||||
|
Generate batch specs from range specifications.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ranges: List of range specifications, each containing:
|
||||||
|
- template: Batch spec template (e.g., "q{q_len}kv1k")
|
||||||
|
- q_len: Dict with start, stop, step, end_inclusive (optional)
|
||||||
|
- Other parameters can also be ranges
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of generated batch spec strings
|
||||||
|
|
||||||
|
Example:
|
||||||
|
ranges = [
|
||||||
|
{
|
||||||
|
"template": "q{q_len}kv1k",
|
||||||
|
"q_len": {
|
||||||
|
"start": 1,
|
||||||
|
"stop": 16,
|
||||||
|
"step": 1,
|
||||||
|
"end_inclusive": true # Optional, defaults to true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
Returns: ["q1kv1k", "q2kv1k", ..., "q16kv1k"]
|
||||||
|
"""
|
||||||
|
all_specs = []
|
||||||
|
|
||||||
|
for range_spec in ranges:
|
||||||
|
template = range_spec.get("template")
|
||||||
|
if not template:
|
||||||
|
raise ValueError("Range specification must include 'template'")
|
||||||
|
|
||||||
|
# Extract all range parameters from the spec
|
||||||
|
range_params = {}
|
||||||
|
for key, value in range_spec.items():
|
||||||
|
if key == "template":
|
||||||
|
continue
|
||||||
|
if isinstance(value, dict) and "start" in value:
|
||||||
|
# This is a range specification
|
||||||
|
start = value["start"]
|
||||||
|
stop = value["stop"]
|
||||||
|
step = value.get("step", 1)
|
||||||
|
# Check if end should be inclusive (default: True)
|
||||||
|
end_inclusive = value.get("end_inclusive", True)
|
||||||
|
|
||||||
|
# Adjust stop based on end_inclusive
|
||||||
|
if end_inclusive:
|
||||||
|
range_params[key] = list(range(start, stop + 1, step))
|
||||||
|
else:
|
||||||
|
range_params[key] = list(range(start, stop, step))
|
||||||
|
else:
|
||||||
|
# This is a fixed value
|
||||||
|
range_params[key] = [value]
|
||||||
|
|
||||||
|
# Generate all combinations (Cartesian product)
|
||||||
|
if range_params:
|
||||||
|
import itertools
|
||||||
|
|
||||||
|
param_names = list(range_params.keys())
|
||||||
|
param_values = [range_params[name] for name in param_names]
|
||||||
|
|
||||||
|
for values in itertools.product(*param_values):
|
||||||
|
params = dict(zip(param_names, values))
|
||||||
|
spec = template.format(**params)
|
||||||
|
all_specs.append(spec)
|
||||||
|
else:
|
||||||
|
# No parameters, just use template as-is
|
||||||
|
all_specs.append(template)
|
||||||
|
|
||||||
|
return all_specs
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Universal vLLM attention benchmark",
|
||||||
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
|
epilog=__doc__,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Config file
|
||||||
|
parser.add_argument(
|
||||||
|
"--config",
|
||||||
|
help="Path to YAML config file (overrides other args)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Backend selection
|
||||||
|
parser.add_argument(
|
||||||
|
"--backends",
|
||||||
|
nargs="+",
|
||||||
|
help="Backends to benchmark (flash, triton, flashinfer, cutlass_mla, "
|
||||||
|
"flashinfer_mla, flashattn_mla, flashmla)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--backend",
|
||||||
|
help="Single backend (alternative to --backends)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Batch specifications
|
||||||
|
parser.add_argument(
|
||||||
|
"--batch-specs",
|
||||||
|
nargs="+",
|
||||||
|
default=["q2k", "8q1s1k"],
|
||||||
|
help="Batch specifications using extended grammar",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Model config
|
||||||
|
parser.add_argument("--num-layers", type=int, default=10, help="Number of layers")
|
||||||
|
parser.add_argument("--head-dim", type=int, default=128, help="Head dimension")
|
||||||
|
parser.add_argument("--num-q-heads", type=int, default=32, help="Query heads")
|
||||||
|
parser.add_argument("--num-kv-heads", type=int, default=8, help="KV heads")
|
||||||
|
parser.add_argument("--block-size", type=int, default=16, help="Block size")
|
||||||
|
|
||||||
|
# Benchmark settings
|
||||||
|
parser.add_argument("--device", default="cuda:0", help="Device")
|
||||||
|
parser.add_argument("--repeats", type=int, default=1, help="Repetitions")
|
||||||
|
parser.add_argument("--warmup-iters", type=int, default=3, help="Warmup iterations")
|
||||||
|
parser.add_argument("--profile-memory", action="store_true", help="Profile memory")
|
||||||
|
|
||||||
|
# Parameter sweep (use YAML config for advanced sweeps)
|
||||||
|
parser.add_argument(
|
||||||
|
"--sweep-param",
|
||||||
|
help="Parameter name to sweep (e.g., num_kv_splits, reorder_batch_threshold)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--sweep-values",
|
||||||
|
type=int,
|
||||||
|
nargs="+",
|
||||||
|
help="Values to sweep for the parameter",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Output
|
||||||
|
parser.add_argument("--output-csv", help="Save to CSV")
|
||||||
|
parser.add_argument("--output-json", help="Save to JSON")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
console = Console()
|
||||||
|
console.print("[bold cyan]vLLM Attention Benchmark[/]")
|
||||||
|
|
||||||
|
# Load config from YAML if provided
|
||||||
|
if args.config:
|
||||||
|
console.print(f"[yellow]Loading config from: {args.config}[/]")
|
||||||
|
yaml_config = load_config_from_yaml(args.config)
|
||||||
|
|
||||||
|
# Show description if available
|
||||||
|
if "description" in yaml_config:
|
||||||
|
console.print(f"[dim]{yaml_config['description']}[/]")
|
||||||
|
|
||||||
|
# Override args with YAML values
|
||||||
|
# (YAML takes precedence unless CLI arg was explicitly set)
|
||||||
|
# Backend(s)
|
||||||
|
if "backend" in yaml_config:
|
||||||
|
args.backend = yaml_config["backend"]
|
||||||
|
args.backends = None
|
||||||
|
elif "backends" in yaml_config:
|
||||||
|
args.backends = yaml_config["backends"]
|
||||||
|
args.backend = None
|
||||||
|
|
||||||
|
# Check for special modes
|
||||||
|
if "mode" in yaml_config:
|
||||||
|
args.mode = yaml_config["mode"]
|
||||||
|
else:
|
||||||
|
args.mode = None
|
||||||
|
|
||||||
|
# Batch specs and sizes
|
||||||
|
# Support both explicit batch_specs and generated batch_spec_ranges
|
||||||
|
if "batch_spec_ranges" in yaml_config:
|
||||||
|
# Generate batch specs from ranges
|
||||||
|
generated_specs = generate_batch_specs_from_ranges(
|
||||||
|
yaml_config["batch_spec_ranges"]
|
||||||
|
)
|
||||||
|
# Combine with any explicit batch_specs
|
||||||
|
if "batch_specs" in yaml_config:
|
||||||
|
args.batch_specs = yaml_config["batch_specs"] + generated_specs
|
||||||
|
else:
|
||||||
|
args.batch_specs = generated_specs
|
||||||
|
console.print(
|
||||||
|
f"[dim]Generated {len(generated_specs)} batch specs from ranges[/]"
|
||||||
|
)
|
||||||
|
elif "batch_specs" in yaml_config:
|
||||||
|
args.batch_specs = yaml_config["batch_specs"]
|
||||||
|
|
||||||
|
if "batch_sizes" in yaml_config:
|
||||||
|
args.batch_sizes = yaml_config["batch_sizes"]
|
||||||
|
else:
|
||||||
|
args.batch_sizes = None
|
||||||
|
|
||||||
|
# Model config
|
||||||
|
if "model" in yaml_config:
|
||||||
|
model = yaml_config["model"]
|
||||||
|
args.num_layers = model.get("num_layers", args.num_layers)
|
||||||
|
args.head_dim = model.get("head_dim", args.head_dim)
|
||||||
|
args.num_q_heads = model.get("num_q_heads", args.num_q_heads)
|
||||||
|
args.num_kv_heads = model.get("num_kv_heads", args.num_kv_heads)
|
||||||
|
args.block_size = model.get("block_size", args.block_size)
|
||||||
|
|
||||||
|
# Benchmark settings
|
||||||
|
if "benchmark" in yaml_config:
|
||||||
|
bench = yaml_config["benchmark"]
|
||||||
|
args.device = bench.get("device", args.device)
|
||||||
|
args.repeats = bench.get("repeats", args.repeats)
|
||||||
|
args.warmup_iters = bench.get("warmup_iters", args.warmup_iters)
|
||||||
|
args.profile_memory = bench.get("profile_memory", args.profile_memory)
|
||||||
|
|
||||||
|
# Parameter sweep configuration
|
||||||
|
if "parameter_sweep" in yaml_config:
|
||||||
|
sweep_config = yaml_config["parameter_sweep"]
|
||||||
|
args.parameter_sweep = ParameterSweep(
|
||||||
|
param_name=sweep_config["param_name"],
|
||||||
|
values=sweep_config["values"],
|
||||||
|
include_auto=sweep_config.get("include_auto", False),
|
||||||
|
label_format=sweep_config.get(
|
||||||
|
"label_format", "{backend}_{param_name}_{value}"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
args.parameter_sweep = None
|
||||||
|
|
||||||
|
# Model parameter sweep configuration
|
||||||
|
if "model_parameter_sweep" in yaml_config:
|
||||||
|
sweep_config = yaml_config["model_parameter_sweep"]
|
||||||
|
args.model_parameter_sweep = ModelParameterSweep(
|
||||||
|
param_name=sweep_config["param_name"],
|
||||||
|
values=sweep_config["values"],
|
||||||
|
label_format=sweep_config.get(
|
||||||
|
"label_format", "{backend}_{param_name}_{value}"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
args.model_parameter_sweep = None
|
||||||
|
|
||||||
|
# Output
|
||||||
|
if "output" in yaml_config:
|
||||||
|
output = yaml_config["output"]
|
||||||
|
if "csv" in output and not args.output_csv:
|
||||||
|
args.output_csv = output["csv"]
|
||||||
|
if "json" in output and not args.output_json:
|
||||||
|
args.output_json = output["json"]
|
||||||
|
|
||||||
|
console.print()
|
||||||
|
|
||||||
|
# Handle CLI-based parameter sweep (if not from YAML)
|
||||||
|
if (
|
||||||
|
(not hasattr(args, "parameter_sweep") or args.parameter_sweep is None)
|
||||||
|
and args.sweep_param
|
||||||
|
and args.sweep_values
|
||||||
|
):
|
||||||
|
args.parameter_sweep = ParameterSweep(
|
||||||
|
param_name=args.sweep_param,
|
||||||
|
values=args.sweep_values,
|
||||||
|
include_auto=False,
|
||||||
|
label_format="{backend}_{param_name}_{value}",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Determine backends
|
||||||
|
backends = args.backends or ([args.backend] if args.backend else ["flash"])
|
||||||
|
console.print(f"Backends: {', '.join(backends)}")
|
||||||
|
console.print(f"Batch specs: {', '.join(args.batch_specs)}")
|
||||||
|
console.print()
|
||||||
|
|
||||||
|
# Run benchmarks
|
||||||
|
all_results = []
|
||||||
|
|
||||||
|
# Handle special mode: decode_vs_prefill comparison
|
||||||
|
if hasattr(args, "mode") and args.mode == "decode_vs_prefill":
|
||||||
|
console.print("[yellow]Mode: Decode vs Prefill pipeline comparison[/]")
|
||||||
|
console.print(
|
||||||
|
"[dim]For each query length, testing both decode and prefill pipelines[/]"
|
||||||
|
)
|
||||||
|
console.print("[dim]Using batched execution for optimal performance[/]")
|
||||||
|
|
||||||
|
# Extract batch sizes from config
|
||||||
|
batch_sizes = getattr(args, "batch_sizes", [1])
|
||||||
|
backend = backends[0] # Use first backend (should only be one)
|
||||||
|
|
||||||
|
# Calculate total benchmarks
|
||||||
|
total = len(batch_sizes)
|
||||||
|
|
||||||
|
with tqdm(total=total, desc="Benchmarking") as pbar:
|
||||||
|
for batch_size in batch_sizes:
|
||||||
|
# Prepare all configs for this batch size
|
||||||
|
configs_with_thresholds = []
|
||||||
|
|
||||||
|
for spec in args.batch_specs:
|
||||||
|
# Parse the batch spec to get query length
|
||||||
|
requests = parse_batch_spec(spec)
|
||||||
|
if not requests:
|
||||||
|
console.print(
|
||||||
|
f"[red]Error: Could not parse batch spec '{spec}'[/]"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get query length from first request
|
||||||
|
query_length = requests[0].q_len
|
||||||
|
|
||||||
|
# Create batch spec for this batch size
|
||||||
|
# For batch_size > 1, we need to prepend the count
|
||||||
|
batch_spec = f"{batch_size}{spec}" if batch_size > 1 else spec
|
||||||
|
|
||||||
|
# Create base config (without backend name)
|
||||||
|
base_config = BenchmarkConfig(
|
||||||
|
backend=backend, # Will be overridden later
|
||||||
|
batch_spec=batch_spec,
|
||||||
|
num_layers=args.num_layers,
|
||||||
|
head_dim=args.head_dim,
|
||||||
|
num_q_heads=args.num_q_heads,
|
||||||
|
num_kv_heads=args.num_kv_heads,
|
||||||
|
block_size=args.block_size,
|
||||||
|
device=args.device,
|
||||||
|
repeats=args.repeats,
|
||||||
|
warmup_iters=args.warmup_iters,
|
||||||
|
profile_memory=args.profile_memory,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add decode pipeline config
|
||||||
|
decode_threshold = query_length
|
||||||
|
config_decode = replace(
|
||||||
|
base_config,
|
||||||
|
backend=f"{backend}_decode_qlen{query_length}_bs{batch_size}",
|
||||||
|
)
|
||||||
|
configs_with_thresholds.append((config_decode, decode_threshold))
|
||||||
|
|
||||||
|
# Add prefill pipeline config if query_length > 1
|
||||||
|
if query_length > 1:
|
||||||
|
prefill_threshold = query_length - 1
|
||||||
|
config_prefill = replace(
|
||||||
|
base_config,
|
||||||
|
backend=f"{backend}_prefill_qlen{query_length}"
|
||||||
|
f"_bs{batch_size}",
|
||||||
|
)
|
||||||
|
configs_with_thresholds.append(
|
||||||
|
(config_prefill, prefill_threshold)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run all benchmarks for this batch size in one go (batched mode)
|
||||||
|
try:
|
||||||
|
from mla_runner import run_mla_benchmark as run_mla
|
||||||
|
|
||||||
|
# Use batched API: pass list of (config, threshold) tuples
|
||||||
|
timing_results = run_mla(backend, configs_with_thresholds)
|
||||||
|
|
||||||
|
# Create BenchmarkResult objects from timing results
|
||||||
|
for (config, _), timing in zip(
|
||||||
|
configs_with_thresholds, timing_results
|
||||||
|
):
|
||||||
|
result = BenchmarkResult(
|
||||||
|
config=config,
|
||||||
|
mean_time=timing["mean"],
|
||||||
|
std_time=timing["std"],
|
||||||
|
min_time=timing["min"],
|
||||||
|
max_time=timing["max"],
|
||||||
|
throughput_tokens_per_sec=timing.get("throughput", None),
|
||||||
|
)
|
||||||
|
all_results.append(result)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
console.print(
|
||||||
|
f"[red]Error running batched benchmarks for "
|
||||||
|
f"batch_size={batch_size}: {e}[/]"
|
||||||
|
)
|
||||||
|
console.print("[red]Traceback:[/]")
|
||||||
|
traceback.print_exc()
|
||||||
|
# Add error results for all configs
|
||||||
|
for config, _ in configs_with_thresholds:
|
||||||
|
result = BenchmarkResult(
|
||||||
|
config=config,
|
||||||
|
mean_time=float("inf"),
|
||||||
|
std_time=0,
|
||||||
|
min_time=float("inf"),
|
||||||
|
max_time=float("inf"),
|
||||||
|
error=str(e),
|
||||||
|
)
|
||||||
|
all_results.append(result)
|
||||||
|
|
||||||
|
pbar.update(1)
|
||||||
|
|
||||||
|
# Display decode vs prefill results
|
||||||
|
console.print("\n[bold green]Decode vs Prefill Results:[/]")
|
||||||
|
|
||||||
|
# Group by batch size
|
||||||
|
by_batch_size = {}
|
||||||
|
for r in all_results:
|
||||||
|
if r.success:
|
||||||
|
# Extract batch size from backend name
|
||||||
|
parts = r.config.backend.split("_")
|
||||||
|
bs_part = [p for p in parts if p.startswith("bs")]
|
||||||
|
if bs_part:
|
||||||
|
bs = int(bs_part[0][2:])
|
||||||
|
if bs not in by_batch_size:
|
||||||
|
by_batch_size[bs] = []
|
||||||
|
by_batch_size[bs].append(r)
|
||||||
|
|
||||||
|
# For each batch size, analyze crossover point
|
||||||
|
for bs in sorted(by_batch_size.keys()):
|
||||||
|
console.print(f"\n[bold cyan]Batch size: {bs}[/]")
|
||||||
|
results = by_batch_size[bs]
|
||||||
|
|
||||||
|
# Group by query length
|
||||||
|
by_qlen = {}
|
||||||
|
for r in results:
|
||||||
|
parts = r.config.backend.split("_")
|
||||||
|
qlen_part = [p for p in parts if p.startswith("qlen")]
|
||||||
|
if qlen_part:
|
||||||
|
qlen = int(qlen_part[0][4:])
|
||||||
|
if qlen not in by_qlen:
|
||||||
|
by_qlen[qlen] = {}
|
||||||
|
|
||||||
|
pipeline = "decode" if "decode" in r.config.backend else "prefill"
|
||||||
|
by_qlen[qlen][pipeline] = r
|
||||||
|
|
||||||
|
# Find crossover point
|
||||||
|
last_decode_faster = None
|
||||||
|
for qlen in sorted(by_qlen.keys()):
|
||||||
|
pipelines = by_qlen[qlen]
|
||||||
|
if "decode" in pipelines and "prefill" in pipelines:
|
||||||
|
decode_time = pipelines["decode"].mean_time
|
||||||
|
prefill_time = pipelines["prefill"].mean_time
|
||||||
|
faster = "decode" if decode_time < prefill_time else "prefill"
|
||||||
|
|
||||||
|
speedup = (
|
||||||
|
prefill_time / decode_time
|
||||||
|
if decode_time < prefill_time
|
||||||
|
else decode_time / prefill_time
|
||||||
|
)
|
||||||
|
|
||||||
|
console.print(
|
||||||
|
f" qlen={qlen:3d}: decode={decode_time:.6f}s, "
|
||||||
|
f"prefill={prefill_time:.6f}s -> "
|
||||||
|
f"[bold]{faster}[/] ({speedup:.2f}x)"
|
||||||
|
)
|
||||||
|
|
||||||
|
if faster == "decode":
|
||||||
|
last_decode_faster = qlen
|
||||||
|
|
||||||
|
if last_decode_faster is not None:
|
||||||
|
optimal_threshold = last_decode_faster
|
||||||
|
console.print(
|
||||||
|
f"\n [bold green]Optimal threshold for batch_size={bs}: "
|
||||||
|
f"{optimal_threshold}[/]"
|
||||||
|
)
|
||||||
|
console.print(
|
||||||
|
f" [dim](Use decode pipeline for query_length <= "
|
||||||
|
f"{optimal_threshold})[/]"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
console.print(
|
||||||
|
f"\n [yellow]Prefill always faster for batch_size={bs}[/]"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle model parameter sweep mode
|
||||||
|
elif hasattr(args, "model_parameter_sweep") and args.model_parameter_sweep:
|
||||||
|
# Model parameter sweep
|
||||||
|
base_config_args = {
|
||||||
|
"num_layers": args.num_layers,
|
||||||
|
"head_dim": args.head_dim,
|
||||||
|
"num_q_heads": args.num_q_heads,
|
||||||
|
"num_kv_heads": args.num_kv_heads,
|
||||||
|
"block_size": args.block_size,
|
||||||
|
"device": args.device,
|
||||||
|
"repeats": args.repeats,
|
||||||
|
"warmup_iters": args.warmup_iters,
|
||||||
|
"profile_memory": args.profile_memory,
|
||||||
|
}
|
||||||
|
all_results = run_model_parameter_sweep(
|
||||||
|
backends,
|
||||||
|
args.batch_specs,
|
||||||
|
base_config_args,
|
||||||
|
args.model_parameter_sweep,
|
||||||
|
console,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle parameter sweep mode (unified)
|
||||||
|
elif hasattr(args, "parameter_sweep") and args.parameter_sweep:
|
||||||
|
# Unified parameter sweep
|
||||||
|
base_config_args = {
|
||||||
|
"num_layers": args.num_layers,
|
||||||
|
"head_dim": args.head_dim,
|
||||||
|
"num_q_heads": args.num_q_heads,
|
||||||
|
"num_kv_heads": args.num_kv_heads,
|
||||||
|
"block_size": args.block_size,
|
||||||
|
"device": args.device,
|
||||||
|
"repeats": args.repeats,
|
||||||
|
"warmup_iters": args.warmup_iters,
|
||||||
|
"profile_memory": args.profile_memory,
|
||||||
|
}
|
||||||
|
all_results = run_parameter_sweep(
|
||||||
|
backends, args.batch_specs, base_config_args, args.parameter_sweep, console
|
||||||
|
)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Normal mode: compare backends
|
||||||
|
total = len(backends) * len(args.batch_specs)
|
||||||
|
|
||||||
|
with tqdm(total=total, desc="Benchmarking") as pbar:
|
||||||
|
for spec in args.batch_specs:
|
||||||
|
for backend in backends:
|
||||||
|
config = BenchmarkConfig(
|
||||||
|
backend=backend,
|
||||||
|
batch_spec=spec,
|
||||||
|
num_layers=args.num_layers,
|
||||||
|
head_dim=args.head_dim,
|
||||||
|
num_q_heads=args.num_q_heads,
|
||||||
|
num_kv_heads=args.num_kv_heads,
|
||||||
|
block_size=args.block_size,
|
||||||
|
device=args.device,
|
||||||
|
repeats=args.repeats,
|
||||||
|
warmup_iters=args.warmup_iters,
|
||||||
|
profile_memory=args.profile_memory,
|
||||||
|
)
|
||||||
|
|
||||||
|
result = run_benchmark(config)
|
||||||
|
all_results.append(result)
|
||||||
|
|
||||||
|
if not result.success:
|
||||||
|
console.print(f"[red]Error {backend} {spec}: {result.error}[/]")
|
||||||
|
|
||||||
|
pbar.update(1)
|
||||||
|
|
||||||
|
# Display results
|
||||||
|
console.print("\n[bold green]Results:[/]")
|
||||||
|
formatter = ResultsFormatter(console)
|
||||||
|
formatter.print_table(all_results, backends)
|
||||||
|
|
||||||
|
# Save results
|
||||||
|
if all_results:
|
||||||
|
formatter = ResultsFormatter(console)
|
||||||
|
if args.output_csv:
|
||||||
|
formatter.save_csv(all_results, args.output_csv)
|
||||||
|
if args.output_json:
|
||||||
|
formatter.save_json(all_results, args.output_json)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
503
benchmarks/attention_benchmarks/common.py
Normal file
503
benchmarks/attention_benchmarks/common.py
Normal file
@@ -0,0 +1,503 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
|
"""Common utilities for attention benchmarking."""
|
||||||
|
|
||||||
|
import csv
|
||||||
|
import json
|
||||||
|
import math
|
||||||
|
from dataclasses import asdict, dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
from rich.console import Console
|
||||||
|
from rich.table import Table
|
||||||
|
|
||||||
|
# Mock classes for vLLM attention infrastructure
|
||||||
|
|
||||||
|
|
||||||
|
class MockHfConfig:
|
||||||
|
"""Mock HuggingFace config that satisfies vLLM's requirements."""
|
||||||
|
|
||||||
|
def __init__(self, mla_dims: dict):
|
||||||
|
self.num_attention_heads = mla_dims["num_q_heads"]
|
||||||
|
self.num_key_value_heads = mla_dims["num_kv_heads"]
|
||||||
|
self.hidden_size = mla_dims["head_dim"] * mla_dims["num_q_heads"]
|
||||||
|
self.model_type = "deepseek_v2"
|
||||||
|
self.is_encoder_decoder = False
|
||||||
|
self.kv_lora_rank = mla_dims["kv_lora_rank"]
|
||||||
|
self.qk_nope_head_dim = mla_dims["qk_nope_head_dim"]
|
||||||
|
self.qk_rope_head_dim = mla_dims["qk_rope_head_dim"]
|
||||||
|
self.v_head_dim = mla_dims["v_head_dim"]
|
||||||
|
self.qk_head_dim = mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"]
|
||||||
|
|
||||||
|
def get_text_config(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
# Import AttentionLayerBase at module level to avoid circular dependencies
|
||||||
|
try:
|
||||||
|
from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase
|
||||||
|
|
||||||
|
_HAS_ATTENTION_LAYER_BASE = True
|
||||||
|
except ImportError:
|
||||||
|
_HAS_ATTENTION_LAYER_BASE = False
|
||||||
|
AttentionLayerBase = object # Fallback
|
||||||
|
|
||||||
|
|
||||||
|
class MockKVBProj:
|
||||||
|
"""Mock KV projection layer for MLA prefill mode.
|
||||||
|
|
||||||
|
Mimics ColumnParallelLinear behavior for kv_b_proj in MLA backends.
|
||||||
|
Projects kv_c_normed to [qk_nope_head_dim + v_head_dim] per head.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, num_heads: int, qk_nope_head_dim: int, v_head_dim: int):
|
||||||
|
self.num_heads = num_heads
|
||||||
|
self.qk_nope_head_dim = qk_nope_head_dim
|
||||||
|
self.v_head_dim = v_head_dim
|
||||||
|
self.out_dim = qk_nope_head_dim + v_head_dim
|
||||||
|
|
||||||
|
def __call__(self, x: torch.Tensor) -> tuple[torch.Tensor]:
|
||||||
|
"""
|
||||||
|
Project kv_c_normed to output space.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
x: Input tensor [num_tokens, kv_lora_rank]
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple containing output tensor
|
||||||
|
[num_tokens, num_heads, qk_nope_head_dim + v_head_dim]
|
||||||
|
"""
|
||||||
|
num_tokens = x.shape[0]
|
||||||
|
result = torch.randn(
|
||||||
|
num_tokens,
|
||||||
|
self.num_heads,
|
||||||
|
self.out_dim,
|
||||||
|
device=x.device,
|
||||||
|
dtype=x.dtype,
|
||||||
|
)
|
||||||
|
return (result,) # Return as tuple to match ColumnParallelLinear API
|
||||||
|
|
||||||
|
|
||||||
|
class MockLayer(AttentionLayerBase):
|
||||||
|
"""Mock attention layer with scale parameters and impl.
|
||||||
|
|
||||||
|
Inherits from AttentionLayerBase so it passes isinstance checks
|
||||||
|
in get_layers_from_vllm_config when FlashInfer prefill is enabled.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, device: torch.device, impl=None, kv_cache_spec=None):
|
||||||
|
# Don't call super().__init__() as AttentionLayerBase doesn't have __init__
|
||||||
|
self._k_scale = torch.tensor(1.0, device=device)
|
||||||
|
self._v_scale = torch.tensor(1.0, device=device)
|
||||||
|
self._q_scale = torch.tensor(1.0, device=device)
|
||||||
|
# Scalar floats for kernels that need them
|
||||||
|
self._k_scale_float = float(self._k_scale.item())
|
||||||
|
self._v_scale_float = float(self._v_scale.item())
|
||||||
|
self._q_scale_float = float(self._q_scale.item())
|
||||||
|
# AttentionImpl for metadata builders to query
|
||||||
|
self.impl = impl
|
||||||
|
# KV cache spec for get_kv_cache_spec
|
||||||
|
self._kv_cache_spec = kv_cache_spec
|
||||||
|
|
||||||
|
def get_attn_backend(self):
|
||||||
|
"""Get the attention backend class (required by AttentionLayerBase)."""
|
||||||
|
# Return None as this is just a mock layer for benchmarking
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_kv_cache_spec(self):
|
||||||
|
"""Get the KV cache spec (required by AttentionLayerBase)."""
|
||||||
|
return self._kv_cache_spec
|
||||||
|
|
||||||
|
|
||||||
|
class MockModelConfig:
|
||||||
|
"""Mock model configuration."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
num_q_heads: int,
|
||||||
|
num_kv_heads: int,
|
||||||
|
head_dim: int,
|
||||||
|
dtype: torch.dtype = torch.float16,
|
||||||
|
max_model_len: int = 32768,
|
||||||
|
):
|
||||||
|
self._n_q = num_q_heads
|
||||||
|
self._n_kv = num_kv_heads
|
||||||
|
self._d = head_dim
|
||||||
|
self.dtype = dtype
|
||||||
|
self.max_model_len = max_model_len
|
||||||
|
|
||||||
|
def get_num_attention_heads(self, _=None) -> int:
|
||||||
|
return self._n_q
|
||||||
|
|
||||||
|
def get_num_kv_heads(self, _=None) -> int:
|
||||||
|
return self._n_kv
|
||||||
|
|
||||||
|
def get_head_size(self) -> int:
|
||||||
|
return self._d
|
||||||
|
|
||||||
|
def get_num_layers(self) -> int:
|
||||||
|
"""Mock method for layer count queries."""
|
||||||
|
return 1
|
||||||
|
|
||||||
|
def get_sliding_window_for_layer(self, _layer_idx: int):
|
||||||
|
"""Mock method for sliding window queries."""
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_logits_soft_cap_for_layer(self, _layer_idx: int):
|
||||||
|
"""Mock method for logits soft cap queries."""
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_sm_scale_for_layer(self, _layer_idx: int) -> float:
|
||||||
|
"""Mock method for SM scale queries."""
|
||||||
|
return 1.0 / (self.get_head_size() ** 0.5)
|
||||||
|
|
||||||
|
|
||||||
|
class MockParallelConfig:
|
||||||
|
"""Mock parallel configuration."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MockCompilationConfig:
|
||||||
|
"""Mock compilation configuration."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.full_cuda_graph = False
|
||||||
|
self.static_forward_context = {}
|
||||||
|
|
||||||
|
|
||||||
|
class MockVLLMConfig:
|
||||||
|
"""Mock VLLM configuration."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.compilation_config = MockCompilationConfig()
|
||||||
|
|
||||||
|
|
||||||
|
class MockRunner:
|
||||||
|
"""Mock GPU runner for metadata builders."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
seq_lens: np.ndarray,
|
||||||
|
query_start_locs: np.ndarray,
|
||||||
|
device: torch.device,
|
||||||
|
num_q_heads: int,
|
||||||
|
num_kv_heads: int,
|
||||||
|
head_dim: int,
|
||||||
|
dtype: torch.dtype,
|
||||||
|
):
|
||||||
|
self.model_config = MockModelConfig(num_q_heads, num_kv_heads, head_dim, dtype)
|
||||||
|
self.parallel_config = MockParallelConfig()
|
||||||
|
self.vllm_config = MockVLLMConfig()
|
||||||
|
self.seq_lens_np = seq_lens
|
||||||
|
self.query_start_loc_np = query_start_locs
|
||||||
|
self.device = device
|
||||||
|
self.attention_chunk_size = None
|
||||||
|
self.num_query_heads = num_q_heads
|
||||||
|
self.num_kv_heads = num_kv_heads
|
||||||
|
self.dtype = dtype
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ParameterSweep:
|
||||||
|
"""Configuration for sweeping a backend parameter."""
|
||||||
|
|
||||||
|
param_name: str # Name of the backend parameter to sweep
|
||||||
|
values: list[Any] # List of values to test
|
||||||
|
include_auto: bool = False # Also test with param unset (auto mode)
|
||||||
|
label_format: str = "{backend}_{param_name}_{value}" # Result label template
|
||||||
|
|
||||||
|
def get_label(self, backend: str, value: Any) -> str:
|
||||||
|
"""Generate a label for a specific parameter value."""
|
||||||
|
return self.label_format.format(
|
||||||
|
backend=backend, param_name=self.param_name, value=value
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ModelParameterSweep:
|
||||||
|
"""Configuration for sweeping a model configuration parameter."""
|
||||||
|
|
||||||
|
param_name: str # Name of the model config parameter to sweep (e.g., "num_q_heads")
|
||||||
|
values: list[Any] # List of values to test
|
||||||
|
label_format: str = "{backend}_{param_name}_{value}" # Result label template
|
||||||
|
|
||||||
|
def get_label(self, backend: str, value: Any) -> str:
|
||||||
|
"""Generate a label for a specific parameter value."""
|
||||||
|
return self.label_format.format(
|
||||||
|
backend=backend, param_name=self.param_name, value=value
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BenchmarkConfig:
|
||||||
|
"""Configuration for a single benchmark run."""
|
||||||
|
|
||||||
|
backend: str
|
||||||
|
batch_spec: str
|
||||||
|
num_layers: int
|
||||||
|
head_dim: int
|
||||||
|
num_q_heads: int
|
||||||
|
num_kv_heads: int
|
||||||
|
block_size: int
|
||||||
|
device: str
|
||||||
|
dtype: torch.dtype = torch.float16
|
||||||
|
repeats: int = 1
|
||||||
|
warmup_iters: int = 3
|
||||||
|
profile_memory: bool = False
|
||||||
|
use_cuda_graphs: bool = False
|
||||||
|
|
||||||
|
# MLA-specific
|
||||||
|
kv_lora_rank: int | None = None
|
||||||
|
qk_nope_head_dim: int | None = None
|
||||||
|
qk_rope_head_dim: int | None = None
|
||||||
|
v_head_dim: int | None = None
|
||||||
|
|
||||||
|
# Backend-specific tuning
|
||||||
|
num_kv_splits: int | None = None # CUTLASS MLA
|
||||||
|
reorder_batch_threshold: int | None = None # FlashAttn MLA, FlashMLA
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BenchmarkResult:
|
||||||
|
"""Results from a single benchmark run."""
|
||||||
|
|
||||||
|
config: BenchmarkConfig
|
||||||
|
mean_time: float # seconds
|
||||||
|
std_time: float # seconds
|
||||||
|
min_time: float # seconds
|
||||||
|
max_time: float # seconds
|
||||||
|
throughput_tokens_per_sec: float | None = None
|
||||||
|
memory_allocated_mb: float | None = None
|
||||||
|
memory_reserved_mb: float | None = None
|
||||||
|
error: str | None = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
"""Whether benchmark completed successfully."""
|
||||||
|
return self.error is None
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
"""Convert to dictionary for serialization."""
|
||||||
|
return {
|
||||||
|
"config": asdict(self.config),
|
||||||
|
"mean_time": self.mean_time,
|
||||||
|
"std_time": self.std_time,
|
||||||
|
"min_time": self.min_time,
|
||||||
|
"max_time": self.max_time,
|
||||||
|
"throughput_tokens_per_sec": self.throughput_tokens_per_sec,
|
||||||
|
"memory_allocated_mb": self.memory_allocated_mb,
|
||||||
|
"memory_reserved_mb": self.memory_reserved_mb,
|
||||||
|
"error": self.error,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ResultsFormatter:
|
||||||
|
"""Format and display benchmark results."""
|
||||||
|
|
||||||
|
def __init__(self, console: Console | None = None):
|
||||||
|
self.console = console or Console()
|
||||||
|
|
||||||
|
def print_table(
|
||||||
|
self,
|
||||||
|
results: list[BenchmarkResult],
|
||||||
|
backends: list[str],
|
||||||
|
compare_to_fastest: bool = True,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Print results as a rich table.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
results: List of BenchmarkResult
|
||||||
|
backends: List of backend names being compared
|
||||||
|
compare_to_fastest: Show percentage comparison to fastest
|
||||||
|
"""
|
||||||
|
# Group by batch spec
|
||||||
|
by_spec = {}
|
||||||
|
for r in results:
|
||||||
|
spec = r.config.batch_spec
|
||||||
|
if spec not in by_spec:
|
||||||
|
by_spec[spec] = {}
|
||||||
|
by_spec[spec][r.config.backend] = r
|
||||||
|
|
||||||
|
# Create shortened backend names for display
|
||||||
|
def shorten_backend_name(name: str) -> str:
|
||||||
|
"""Shorten long backend names for table display."""
|
||||||
|
# Remove common prefixes
|
||||||
|
name = name.replace("flashattn_mla", "famla")
|
||||||
|
name = name.replace("flashinfer_mla", "fimla")
|
||||||
|
name = name.replace("flashmla", "fmla")
|
||||||
|
name = name.replace("cutlass_mla", "cmla")
|
||||||
|
name = name.replace("numsplits", "ns")
|
||||||
|
return name
|
||||||
|
|
||||||
|
table = Table(title="Attention Benchmark Results")
|
||||||
|
table.add_column("Batch\nSpec", no_wrap=True)
|
||||||
|
|
||||||
|
multi = len(backends) > 1
|
||||||
|
for backend in backends:
|
||||||
|
short_name = shorten_backend_name(backend)
|
||||||
|
# Time column
|
||||||
|
col_time = f"{short_name}\nTime (s)"
|
||||||
|
table.add_column(col_time, justify="right", no_wrap=False)
|
||||||
|
if multi and compare_to_fastest:
|
||||||
|
# Relative performance column
|
||||||
|
col_rel = f"{short_name}\nvs Best"
|
||||||
|
table.add_column(col_rel, justify="right", no_wrap=False)
|
||||||
|
|
||||||
|
# Add rows
|
||||||
|
for spec in sorted(by_spec.keys()):
|
||||||
|
spec_results = by_spec[spec]
|
||||||
|
times = {b: r.mean_time for b, r in spec_results.items() if r.success}
|
||||||
|
best_time = min(times.values()) if times else 0.0
|
||||||
|
|
||||||
|
row = [spec]
|
||||||
|
for backend in backends:
|
||||||
|
if backend in spec_results:
|
||||||
|
r = spec_results[backend]
|
||||||
|
if r.success:
|
||||||
|
row.append(f"{r.mean_time:.6f}")
|
||||||
|
if multi and compare_to_fastest:
|
||||||
|
pct = (
|
||||||
|
(r.mean_time / best_time * 100) if best_time > 0 else 0
|
||||||
|
)
|
||||||
|
pct_str = f"{pct:.1f}%"
|
||||||
|
if r.mean_time == best_time:
|
||||||
|
pct_str = f"[bold green]{pct_str}[/]"
|
||||||
|
row.append(pct_str)
|
||||||
|
else:
|
||||||
|
row.append("[red]ERROR[/]")
|
||||||
|
if multi and compare_to_fastest:
|
||||||
|
row.append("-")
|
||||||
|
else:
|
||||||
|
row.append("-")
|
||||||
|
if multi and compare_to_fastest:
|
||||||
|
row.append("-")
|
||||||
|
|
||||||
|
table.add_row(*row)
|
||||||
|
|
||||||
|
self.console.print(table)
|
||||||
|
|
||||||
|
def save_csv(self, results: list[BenchmarkResult], path: str):
|
||||||
|
"""Save results to CSV file."""
|
||||||
|
if not results:
|
||||||
|
return
|
||||||
|
|
||||||
|
path_obj = Path(path)
|
||||||
|
path_obj.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
with open(path, "w", newline="") as f:
|
||||||
|
writer = csv.DictWriter(
|
||||||
|
f,
|
||||||
|
fieldnames=[
|
||||||
|
"backend",
|
||||||
|
"batch_spec",
|
||||||
|
"num_layers",
|
||||||
|
"mean_time",
|
||||||
|
"std_time",
|
||||||
|
"throughput",
|
||||||
|
"memory_mb",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
writer.writeheader()
|
||||||
|
for r in results:
|
||||||
|
writer.writerow(
|
||||||
|
{
|
||||||
|
"backend": r.config.backend,
|
||||||
|
"batch_spec": r.config.batch_spec,
|
||||||
|
"num_layers": r.config.num_layers,
|
||||||
|
"mean_time": r.mean_time,
|
||||||
|
"std_time": r.std_time,
|
||||||
|
"throughput": r.throughput_tokens_per_sec or 0,
|
||||||
|
"memory_mb": r.memory_allocated_mb or 0,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.console.print(f"[green]Saved CSV results to {path}[/]")
|
||||||
|
|
||||||
|
def save_json(self, results: list[BenchmarkResult], path: str):
|
||||||
|
"""Save results to JSON file."""
|
||||||
|
path_obj = Path(path)
|
||||||
|
path_obj.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
data = [r.to_dict() for r in results]
|
||||||
|
with open(path, "w") as f:
|
||||||
|
json.dump(data, f, indent=2, default=str)
|
||||||
|
|
||||||
|
self.console.print(f"[green]Saved JSON results to {path}[/]")
|
||||||
|
|
||||||
|
|
||||||
|
def setup_mla_dims(model_name: str = "deepseek-v3") -> dict:
|
||||||
|
"""
|
||||||
|
Get MLA dimensions for known models.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_name: Model identifier
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with MLA dimension configuration
|
||||||
|
"""
|
||||||
|
configs = {
|
||||||
|
"deepseek-v2": {
|
||||||
|
"kv_lora_rank": 512,
|
||||||
|
"qk_nope_head_dim": 128,
|
||||||
|
"qk_rope_head_dim": 64,
|
||||||
|
"v_head_dim": 128,
|
||||||
|
"num_q_heads": 128,
|
||||||
|
"num_kv_heads": 1,
|
||||||
|
"head_dim": 576,
|
||||||
|
},
|
||||||
|
"deepseek-v3": {
|
||||||
|
"kv_lora_rank": 512,
|
||||||
|
"qk_nope_head_dim": 128,
|
||||||
|
"qk_rope_head_dim": 64,
|
||||||
|
"v_head_dim": 128,
|
||||||
|
"num_q_heads": 128,
|
||||||
|
"num_kv_heads": 1,
|
||||||
|
"head_dim": 576,
|
||||||
|
},
|
||||||
|
"deepseek-v2-lite": {
|
||||||
|
"kv_lora_rank": 512,
|
||||||
|
"qk_nope_head_dim": 128,
|
||||||
|
"qk_rope_head_dim": 64,
|
||||||
|
"v_head_dim": 128,
|
||||||
|
"num_q_heads": 16,
|
||||||
|
"num_kv_heads": 1,
|
||||||
|
"head_dim": 576,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if model_name not in configs:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unknown model '{model_name}'. Known models: {list(configs.keys())}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return configs[model_name]
|
||||||
|
|
||||||
|
|
||||||
|
def get_attention_scale(head_dim: int) -> float:
|
||||||
|
"""Compute attention scale factor (1/sqrt(d))."""
|
||||||
|
return 1.0 / math.sqrt(head_dim)
|
||||||
|
|
||||||
|
|
||||||
|
def is_mla_backend(backend: str) -> bool:
|
||||||
|
"""
|
||||||
|
Check if backend is an MLA backend using the backend's is_mla() property.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
backend: Backend name (e.g., "CUTLASS_MLA", "FLASHINFER_MLA")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if the backend is an MLA backend, False otherwise
|
||||||
|
"""
|
||||||
|
from vllm.v1.attention.backends.registry import AttentionBackendEnum
|
||||||
|
|
||||||
|
try:
|
||||||
|
backend_class = AttentionBackendEnum[backend.upper()].get_class()
|
||||||
|
return backend_class.is_mla()
|
||||||
|
except (KeyError, ValueError, ImportError):
|
||||||
|
return False
|
||||||
61
benchmarks/attention_benchmarks/configs/mla_decode.yaml
Normal file
61
benchmarks/attention_benchmarks/configs/mla_decode.yaml
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# MLA decode-only benchmark configuration
|
||||||
|
|
||||||
|
model:
|
||||||
|
name: "deepseek-v3"
|
||||||
|
num_layers: 60
|
||||||
|
num_q_heads: 128
|
||||||
|
num_kv_heads: 1 # MLA uses single latent KV
|
||||||
|
head_dim: 576
|
||||||
|
kv_lora_rank: 512
|
||||||
|
qk_nope_head_dim: 128
|
||||||
|
qk_rope_head_dim: 64
|
||||||
|
v_head_dim: 128
|
||||||
|
block_size: 128 # CUTLASS MLA and FlashAttn MLA use 128
|
||||||
|
|
||||||
|
batch_specs:
|
||||||
|
# Small batches, varying sequence lengths
|
||||||
|
- "16q1s512" # 16 requests, 512 KV cache
|
||||||
|
- "16q1s1k" # 16 requests, 1k KV cache
|
||||||
|
- "16q1s2k" # 16 requests, 2k KV cache
|
||||||
|
- "16q1s4k" # 16 requests, 4k KV cache
|
||||||
|
|
||||||
|
# Medium batches
|
||||||
|
- "32q1s1k" # 32 requests, 1k KV cache
|
||||||
|
- "32q1s2k" # 32 requests, 2k KV cache
|
||||||
|
- "32q1s4k" # 32 requests, 4k KV cache
|
||||||
|
- "32q1s8k" # 32 requests, 8k KV cache
|
||||||
|
|
||||||
|
# Large batches
|
||||||
|
- "64q1s1k" # 64 requests, 1k KV cache
|
||||||
|
- "64q1s2k" # 64 requests, 2k KV cache
|
||||||
|
- "64q1s4k" # 64 requests, 4k KV cache
|
||||||
|
- "64q1s8k" # 64 requests, 8k KV cache
|
||||||
|
|
||||||
|
# Very large batches
|
||||||
|
- "128q1s1k" # 128 requests, 1k KV cache
|
||||||
|
- "128q1s2k" # 128 requests, 2k KV cache
|
||||||
|
|
||||||
|
# Long context
|
||||||
|
- "32q1s16k" # 32 requests, 16k KV cache
|
||||||
|
- "32q1s32k" # 32 requests, 32k KV cache
|
||||||
|
|
||||||
|
backends:
|
||||||
|
- cutlass_mla
|
||||||
|
- flashinfer_mla
|
||||||
|
- flashattn_mla # Hopper only
|
||||||
|
- flashmla # Hopper only
|
||||||
|
|
||||||
|
device: "cuda:0"
|
||||||
|
repeats: 5
|
||||||
|
warmup_iters: 3
|
||||||
|
profile_memory: true
|
||||||
|
|
||||||
|
# Backend-specific tuning
|
||||||
|
cutlass_mla:
|
||||||
|
num_kv_splits: auto # or specific value like 4, 8, 16
|
||||||
|
|
||||||
|
flashattn_mla:
|
||||||
|
reorder_batch_threshold: 512
|
||||||
|
|
||||||
|
flashmla:
|
||||||
|
reorder_batch_threshold: 1
|
||||||
60
benchmarks/attention_benchmarks/configs/mla_mixed_batch.yaml
Normal file
60
benchmarks/attention_benchmarks/configs/mla_mixed_batch.yaml
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
# MLA mixed batch benchmark (prefill + decode)
|
||||||
|
# Tests chunked prefill performance
|
||||||
|
|
||||||
|
model:
|
||||||
|
name: "deepseek-v3"
|
||||||
|
num_layers: 60
|
||||||
|
num_q_heads: 128
|
||||||
|
num_kv_heads: 1
|
||||||
|
head_dim: 576
|
||||||
|
kv_lora_rank: 512
|
||||||
|
qk_nope_head_dim: 128
|
||||||
|
qk_rope_head_dim: 64
|
||||||
|
v_head_dim: 128
|
||||||
|
block_size: 128
|
||||||
|
|
||||||
|
batch_specs:
|
||||||
|
# Small prefill + decode
|
||||||
|
- "1q1k_8q1s1k" # 1 prefill + 8 decode
|
||||||
|
- "2q2k_16q1s1k" # 2 prefill + 16 decode
|
||||||
|
- "4q1k_32q1s2k" # 4 prefill + 32 decode
|
||||||
|
|
||||||
|
# Medium prefill + decode
|
||||||
|
- "2q4k_32q1s2k" # 2 medium prefill + 32 decode
|
||||||
|
- "4q4k_64q1s2k" # 4 medium prefill + 64 decode
|
||||||
|
- "8q2k_64q1s4k" # 8 prefill + 64 decode
|
||||||
|
|
||||||
|
# Large prefill + decode (chunked prefill stress test)
|
||||||
|
- "2q8k_32q1s1k" # 2 large prefill + 32 decode
|
||||||
|
- "1q16k_16q1s2k" # 1 very large prefill + 16 decode
|
||||||
|
- "2q16k_32q1s4k" # 2 very large prefill + 32 decode
|
||||||
|
|
||||||
|
# Context extension + decode
|
||||||
|
- "2q1kkv2k_16q1s1k" # 2 extend + 16 decode
|
||||||
|
- "4q2kkv4k_32q1s2k" # 4 extend + 32 decode
|
||||||
|
- "2q1kkv8k_32q1s2k" # 2 large extend + 32 decode
|
||||||
|
|
||||||
|
# Explicitly chunked prefill
|
||||||
|
- "q8k" # 8k prefill with chunking hint
|
||||||
|
- "q16k" # 16k prefill with chunking hint
|
||||||
|
- "2q8k_32q1s2k" # 2 chunked prefill + 32 decode
|
||||||
|
|
||||||
|
# High decode ratio (realistic serving)
|
||||||
|
- "1q2k_63q1s1k" # 1 prefill + 63 decode
|
||||||
|
- "2q2k_62q1s2k" # 2 prefill + 62 decode
|
||||||
|
- "4q4k_60q1s4k" # 4 prefill + 60 decode
|
||||||
|
|
||||||
|
backends:
|
||||||
|
- cutlass_mla
|
||||||
|
- flashinfer_mla
|
||||||
|
- flashattn_mla # Hopper only
|
||||||
|
- flashmla # Hopper only
|
||||||
|
|
||||||
|
device: "cuda:0"
|
||||||
|
repeats: 5
|
||||||
|
warmup_iters: 3
|
||||||
|
profile_memory: true
|
||||||
|
|
||||||
|
# Analyze chunked prefill workspace size impact
|
||||||
|
chunked_prefill:
|
||||||
|
test_workspace_sizes: [4096, 8192, 16384, 32768, 65536]
|
||||||
@@ -0,0 +1,88 @@
|
|||||||
|
# Study 4: What is optimal reorder_batch_threshold for MLA backends supporting query length > 1?
|
||||||
|
# Question: At what query length does prefill pipeline become faster than decode pipeline?
|
||||||
|
# Methodology: For each query length, compare decode vs prefill performance to find crossover point
|
||||||
|
# Applies to: FlashAttn MLA, FlashMLA
|
||||||
|
|
||||||
|
description: "Decode vs Prefill pipeline crossover analysis"
|
||||||
|
|
||||||
|
# Test FlashAttn MLA
|
||||||
|
backend: flashattn_mla
|
||||||
|
|
||||||
|
# Mode: decode_vs_prefill comparison (special sweep mode)
|
||||||
|
# For each batch spec, we'll test both decode and prefill pipelines
|
||||||
|
mode: "decode_vs_prefill"
|
||||||
|
|
||||||
|
# Query lengths to test (from old benchmark_mla_threshold.py methodology)
|
||||||
|
# Each query length will be tested with BOTH decode and prefill pipelines:
|
||||||
|
# - decode: threshold >= query_length (forces decode pipeline)
|
||||||
|
# - prefill: threshold < query_length (forces prefill pipeline)
|
||||||
|
#
|
||||||
|
# We use q<N>s1k format which creates q_len=N, seq_len=1024 requests
|
||||||
|
# This tests different query lengths with fixed sequence length context
|
||||||
|
#
|
||||||
|
# Using batch_spec_ranges for automatic generation:
|
||||||
|
batch_spec_ranges:
|
||||||
|
- template: "q{q_len}s1k"
|
||||||
|
q_len:
|
||||||
|
start: 1
|
||||||
|
stop: 16
|
||||||
|
step: 1
|
||||||
|
end_inclusive: false
|
||||||
|
- template: "q{q_len}s1k"
|
||||||
|
q_len:
|
||||||
|
start: 16
|
||||||
|
stop: 64
|
||||||
|
step: 2
|
||||||
|
end_inclusive: false
|
||||||
|
- template: "q{q_len}s1k"
|
||||||
|
q_len:
|
||||||
|
start: 64
|
||||||
|
stop: 1024
|
||||||
|
step: 4
|
||||||
|
end_inclusive: true
|
||||||
|
|
||||||
|
# Batch sizes to test (from old script)
|
||||||
|
batch_sizes:
|
||||||
|
- 1
|
||||||
|
- 2
|
||||||
|
- 4
|
||||||
|
- 8
|
||||||
|
- 16
|
||||||
|
- 32
|
||||||
|
- 64
|
||||||
|
- 128
|
||||||
|
- 256
|
||||||
|
|
||||||
|
# Model configuration (DeepSeek V2/V3 defaults)
|
||||||
|
model:
|
||||||
|
num_layers: 10
|
||||||
|
head_dim: 576
|
||||||
|
num_q_heads: 128
|
||||||
|
num_kv_heads: 1
|
||||||
|
block_size: 128
|
||||||
|
|
||||||
|
# Benchmark settings
|
||||||
|
benchmark:
|
||||||
|
device: "cuda:0"
|
||||||
|
repeats: 15 # More repeats for spec decode variance
|
||||||
|
warmup_iters: 5
|
||||||
|
profile_memory: false
|
||||||
|
|
||||||
|
# Output
|
||||||
|
output:
|
||||||
|
csv: "reorder_threshold_results.csv"
|
||||||
|
json: "reorder_threshold_results.json"
|
||||||
|
|
||||||
|
# Expected outcome (reproduces old benchmark_mla_threshold.py study):
|
||||||
|
# - For each batch size, find the crossover point where prefill becomes faster than decode
|
||||||
|
# - Show decode vs prefill performance across all query lengths
|
||||||
|
# - Determine optimal reorder_batch_threshold based on last query length where decode is faster
|
||||||
|
# - Understand how crossover point varies with batch size
|
||||||
|
# - Provide data-driven guidance for default threshold value
|
||||||
|
#
|
||||||
|
# Methodology (from old script):
|
||||||
|
# - Each query length tested with BOTH pipelines:
|
||||||
|
# * decode: threshold >= query_length (forces decode pipeline)
|
||||||
|
# * prefill: threshold < query_length (forces prefill pipeline)
|
||||||
|
# - Compare which is faster to find crossover point
|
||||||
|
#
|
||||||
@@ -0,0 +1,62 @@
|
|||||||
|
# Speculative decoding benchmark configuration
|
||||||
|
# Tests reorder_batch_threshold optimization
|
||||||
|
|
||||||
|
model:
|
||||||
|
name: "deepseek-v3"
|
||||||
|
num_layers: 60
|
||||||
|
num_q_heads: 128
|
||||||
|
num_kv_heads: 1
|
||||||
|
head_dim: 576
|
||||||
|
kv_lora_rank: 512
|
||||||
|
qk_nope_head_dim: 128
|
||||||
|
qk_rope_head_dim: 64
|
||||||
|
v_head_dim: 128
|
||||||
|
|
||||||
|
batch_specs:
|
||||||
|
# Pure speculative decode (K-token verification)
|
||||||
|
- "q2s1k" # 2-token spec, 1k KV
|
||||||
|
- "q4s1k" # 4-token spec, 1k KV
|
||||||
|
- "q8s1k" # 8-token spec, 1k KV
|
||||||
|
- "q16s1k" # 16-token spec, 1k KV
|
||||||
|
|
||||||
|
# Speculative with different context lengths
|
||||||
|
- "q4s2k" # 4-token spec, 2k KV
|
||||||
|
- "q4s4k" # 4-token spec, 4k KV
|
||||||
|
- "q8s2k" # 8-token spec, 2k KV
|
||||||
|
- "q8s4k" # 8-token spec, 4k KV
|
||||||
|
|
||||||
|
# Mixed: speculative + regular decode
|
||||||
|
- "32q4s1k" # 32 spec requests
|
||||||
|
- "16q4s1k_16q1s1k" # 16 spec + 16 regular
|
||||||
|
- "8q8s2k_24q1s2k" # 8 spec (8-tok) + 24 regular
|
||||||
|
|
||||||
|
# Mixed: speculative + prefill + decode
|
||||||
|
- "2q1k_16q4s1k_16q1s1k" # 2 prefill + 16 spec + 16 decode
|
||||||
|
- "4q2k_32q4s2k_32q1s2k" # 4 prefill + 32 spec + 32 decode
|
||||||
|
|
||||||
|
# Large batches with speculation
|
||||||
|
- "64q4s1k" # 64 spec requests
|
||||||
|
- "32q8s2k" # 32 spec (8-token)
|
||||||
|
- "16q16s4k" # 16 spec (16-token)
|
||||||
|
|
||||||
|
# Backends that support query length > 1
|
||||||
|
backends:
|
||||||
|
- flashattn_mla # reorder_batch_threshold = 512
|
||||||
|
- flashmla # reorder_batch_threshold = 1 (tunable)
|
||||||
|
|
||||||
|
# FlashInfer-MLA also supports uniform spec-as-decode but with different mechanism
|
||||||
|
# - flashinfer_mla
|
||||||
|
|
||||||
|
# Benchmark settings
|
||||||
|
benchmark:
|
||||||
|
device: "cuda:0"
|
||||||
|
repeats: 10 # More repeats for statistical significance
|
||||||
|
warmup_iters: 5
|
||||||
|
profile_memory: false
|
||||||
|
|
||||||
|
# Test these threshold values for optimization
|
||||||
|
parameter_sweep:
|
||||||
|
param_name: "reorder_batch_threshold"
|
||||||
|
values: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
|
||||||
|
include_auto: false
|
||||||
|
label_format: "{backend}_threshold_{value}"
|
||||||
@@ -0,0 +1,40 @@
|
|||||||
|
# Standard attention backend benchmark configuration
|
||||||
|
|
||||||
|
model:
|
||||||
|
num_layers: 32
|
||||||
|
num_q_heads: 32
|
||||||
|
num_kv_heads: 8 # GQA with 4:1 ratio
|
||||||
|
head_dim: 128
|
||||||
|
block_size: 16
|
||||||
|
|
||||||
|
batch_specs:
|
||||||
|
# Pure prefill
|
||||||
|
- "q512" # Small prefill (512 tokens)
|
||||||
|
- "q2k" # Medium prefill (2048 tokens)
|
||||||
|
- "q4k" # Large prefill (4096 tokens)
|
||||||
|
- "q8k" # Very large prefill (8192 tokens)
|
||||||
|
|
||||||
|
# Pure decode
|
||||||
|
- "8q1s1k" # 8 requests, 1k KV cache each
|
||||||
|
- "16q1s2k" # 16 requests, 2k KV cache each
|
||||||
|
- "32q1s1k" # 32 requests, 1k KV cache each
|
||||||
|
- "64q1s4k" # 64 requests, 4k KV cache each
|
||||||
|
|
||||||
|
# Mixed prefill/decode
|
||||||
|
- "2q2k_8q1s1k" # 2 prefill + 8 decode
|
||||||
|
- "4q1k_16q1s2k" # 4 prefill + 16 decode
|
||||||
|
- "2q4k_32q1s1k" # 2 large prefill + 32 decode
|
||||||
|
|
||||||
|
# Context extension
|
||||||
|
- "q1ks2k" # 1k query, 2k sequence (chunked prefill)
|
||||||
|
- "2q1ks4k" # 2 requests: 1k query, 4k sequence
|
||||||
|
|
||||||
|
backends:
|
||||||
|
- flash
|
||||||
|
- triton
|
||||||
|
- flashinfer
|
||||||
|
|
||||||
|
device: "cuda:0"
|
||||||
|
repeats: 5
|
||||||
|
warmup_iters: 3
|
||||||
|
profile_memory: false
|
||||||
836
benchmarks/attention_benchmarks/mla_runner.py
Normal file
836
benchmarks/attention_benchmarks/mla_runner.py
Normal file
@@ -0,0 +1,836 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
|
"""
|
||||||
|
MLA benchmark runner - shared utilities for MLA benchmarks.
|
||||||
|
|
||||||
|
This module provides helpers for running MLA backends without
|
||||||
|
needing full VllmConfig integration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
from batch_spec import parse_batch_spec
|
||||||
|
from common import (
|
||||||
|
BenchmarkResult,
|
||||||
|
MockHfConfig,
|
||||||
|
MockKVBProj,
|
||||||
|
MockLayer,
|
||||||
|
setup_mla_dims,
|
||||||
|
)
|
||||||
|
|
||||||
|
from vllm.config import (
|
||||||
|
CacheConfig,
|
||||||
|
CompilationConfig,
|
||||||
|
ModelConfig,
|
||||||
|
ParallelConfig,
|
||||||
|
SchedulerConfig,
|
||||||
|
VllmConfig,
|
||||||
|
set_current_vllm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# VllmConfig Creation
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _add_mock_methods_to_model_config(model_config: ModelConfig) -> None:
|
||||||
|
"""
|
||||||
|
Add mock methods for layer-specific queries to ModelConfig.
|
||||||
|
|
||||||
|
These methods are needed by metadata builders but aren't normally
|
||||||
|
present on ModelConfig when used in benchmark contexts.
|
||||||
|
"""
|
||||||
|
import types
|
||||||
|
|
||||||
|
model_config.get_num_layers = types.MethodType(lambda self: 1, model_config)
|
||||||
|
model_config.get_sliding_window_for_layer = types.MethodType(
|
||||||
|
lambda self, _i: None, model_config
|
||||||
|
)
|
||||||
|
model_config.get_logits_soft_cap_for_layer = types.MethodType(
|
||||||
|
lambda self, _i: None, model_config
|
||||||
|
)
|
||||||
|
model_config.get_sm_scale_for_layer = types.MethodType(
|
||||||
|
lambda self, _i: 1.0 / model_config.get_head_size() ** 0.5, model_config
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def create_minimal_vllm_config(
|
||||||
|
model_name: str = "deepseek-v3",
|
||||||
|
block_size: int = 128,
|
||||||
|
max_num_seqs: int = 256,
|
||||||
|
mla_dims: dict | None = None,
|
||||||
|
) -> VllmConfig:
|
||||||
|
"""
|
||||||
|
Create minimal VllmConfig for MLA benchmarks.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_name: Model name (deepseek-v2, deepseek-v3, etc.) - used if mla_dims not
|
||||||
|
provided
|
||||||
|
block_size: KV cache block size
|
||||||
|
max_num_seqs: Maximum number of sequences
|
||||||
|
mla_dims: Optional custom MLA dimensions dict. If not provided, uses
|
||||||
|
setup_mla_dims(model_name)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
VllmConfig for benchmarking
|
||||||
|
"""
|
||||||
|
# Get MLA dimensions - use provided or load from model name
|
||||||
|
if mla_dims is None:
|
||||||
|
mla_dims = setup_mla_dims(model_name)
|
||||||
|
|
||||||
|
# Create mock HF config first (avoids downloading from HuggingFace)
|
||||||
|
mock_hf_config = MockHfConfig(mla_dims)
|
||||||
|
|
||||||
|
# Create a temporary minimal config.json to avoid HF downloads
|
||||||
|
# This ensures consistent ModelConfig construction without network access
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
minimal_config = {
|
||||||
|
"architectures": ["DeepseekV2ForCausalLM"],
|
||||||
|
"model_type": "deepseek_v2",
|
||||||
|
"num_attention_heads": mla_dims["num_q_heads"],
|
||||||
|
"num_key_value_heads": mla_dims["num_kv_heads"],
|
||||||
|
"hidden_size": mla_dims["head_dim"] * mla_dims["num_q_heads"],
|
||||||
|
"torch_dtype": "bfloat16",
|
||||||
|
"max_position_embeddings": 163840, # DeepSeek V3 default
|
||||||
|
"rope_theta": 10000.0,
|
||||||
|
"vocab_size": 128256,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create temporary directory with config.json
|
||||||
|
temp_dir = tempfile.mkdtemp(prefix="vllm_bench_")
|
||||||
|
config_path = os.path.join(temp_dir, "config.json")
|
||||||
|
with open(config_path, "w") as f:
|
||||||
|
json.dump(minimal_config, f)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Create model config using local path - no HF downloads
|
||||||
|
model_config = ModelConfig(
|
||||||
|
model=temp_dir, # Use local temp directory
|
||||||
|
tokenizer=None,
|
||||||
|
tokenizer_mode="auto",
|
||||||
|
trust_remote_code=True,
|
||||||
|
dtype="bfloat16",
|
||||||
|
seed=0,
|
||||||
|
max_model_len=32768,
|
||||||
|
quantization=None,
|
||||||
|
quantization_param_path=None,
|
||||||
|
enforce_eager=False,
|
||||||
|
max_context_len_to_capture=None,
|
||||||
|
max_seq_len_to_capture=8192,
|
||||||
|
max_logprobs=20,
|
||||||
|
disable_sliding_window=False,
|
||||||
|
skip_tokenizer_init=True,
|
||||||
|
served_model_name=None,
|
||||||
|
limit_mm_per_prompt=None,
|
||||||
|
use_async_output_proc=True,
|
||||||
|
config_format="auto",
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
# Clean up temporary directory
|
||||||
|
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||||
|
|
||||||
|
# Override with our mock config
|
||||||
|
model_config.hf_config = mock_hf_config
|
||||||
|
model_config.hf_text_config = mock_hf_config
|
||||||
|
|
||||||
|
# Add mock methods for layer-specific queries
|
||||||
|
_add_mock_methods_to_model_config(model_config)
|
||||||
|
|
||||||
|
# Create sub-configs
|
||||||
|
cache_config = CacheConfig(
|
||||||
|
block_size=block_size,
|
||||||
|
gpu_memory_utilization=0.9,
|
||||||
|
swap_space=0,
|
||||||
|
cache_dtype="auto",
|
||||||
|
enable_prefix_caching=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
scheduler_config = SchedulerConfig(
|
||||||
|
max_num_seqs=max_num_seqs,
|
||||||
|
max_num_batched_tokens=8192,
|
||||||
|
max_model_len=32768,
|
||||||
|
is_encoder_decoder=False,
|
||||||
|
enable_chunked_prefill=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
parallel_config = ParallelConfig(
|
||||||
|
tensor_parallel_size=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
compilation_config = CompilationConfig()
|
||||||
|
|
||||||
|
return VllmConfig(
|
||||||
|
model_config=model_config,
|
||||||
|
cache_config=cache_config,
|
||||||
|
parallel_config=parallel_config,
|
||||||
|
scheduler_config=scheduler_config,
|
||||||
|
compilation_config=compilation_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Backend Configuration
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
# Backend name to class name prefix mapping
|
||||||
|
_BACKEND_NAME_MAP = {
|
||||||
|
"flashattn_mla": "FlashAttnMLA",
|
||||||
|
"flashmla": "FlashMLA",
|
||||||
|
"flashinfer_mla": "FlashInferMLA",
|
||||||
|
"cutlass_mla": "CutlassMLA",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Special properties that differ from defaults
|
||||||
|
_BACKEND_PROPERTIES = {
|
||||||
|
"flashmla": {
|
||||||
|
"query_format": "concat", # Single concatenated tensor (vs tuple)
|
||||||
|
"block_size": 64, # FlashMLA uses fixed block size
|
||||||
|
},
|
||||||
|
"flashinfer_mla": {
|
||||||
|
"block_size": 64, # FlashInfer MLA only supports 32 or 64
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _get_backend_config(backend: str) -> dict:
|
||||||
|
"""
|
||||||
|
Get backend configuration using naming conventions.
|
||||||
|
|
||||||
|
All MLA backends follow the pattern:
|
||||||
|
- Module: vllm.v1.attention.backends.mla.{backend}
|
||||||
|
- Impl: {Name}Impl
|
||||||
|
- Metadata: {Name}Metadata (or MLACommonMetadata)
|
||||||
|
- DecodeMetadata: {Name}DecodeMetadata (or MLACommonDecodeMetadata)
|
||||||
|
- MetadataBuilder: {Name}MetadataBuilder
|
||||||
|
"""
|
||||||
|
if backend not in _BACKEND_NAME_MAP:
|
||||||
|
raise ValueError(f"Unknown backend: {backend}")
|
||||||
|
|
||||||
|
name = _BACKEND_NAME_MAP[backend]
|
||||||
|
props = _BACKEND_PROPERTIES.get(backend, {})
|
||||||
|
|
||||||
|
# Check if backend uses common metadata (FlashInfer, CUTLASS)
|
||||||
|
uses_common = backend in ("flashinfer_mla", "cutlass_mla")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"module": f"vllm.v1.attention.backends.mla.{backend}",
|
||||||
|
"impl_class": f"{name}Impl",
|
||||||
|
"metadata_class": "MLACommonMetadata" if uses_common else f"{name}Metadata",
|
||||||
|
"decode_metadata_class": "MLACommonDecodeMetadata"
|
||||||
|
if uses_common
|
||||||
|
else f"{name}DecodeMetadata",
|
||||||
|
"builder_class": f"{name}MetadataBuilder",
|
||||||
|
"query_format": props.get("query_format", "tuple"),
|
||||||
|
"block_size": props.get("block_size", None),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Metadata Building Helpers
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _build_attention_metadata(
|
||||||
|
requests: list,
|
||||||
|
block_size: int,
|
||||||
|
device: torch.device,
|
||||||
|
builder_instance,
|
||||||
|
) -> tuple:
|
||||||
|
"""
|
||||||
|
Build attention metadata from batch requests.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requests: List of BatchRequest objects
|
||||||
|
block_size: KV cache block size
|
||||||
|
device: Target device
|
||||||
|
builder_instance: Metadata builder instance
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (metadata, kv_cache_num_blocks)
|
||||||
|
"""
|
||||||
|
q_lens = [r.q_len for r in requests]
|
||||||
|
kv_lens = [r.kv_len for r in requests]
|
||||||
|
total_q = sum(q_lens)
|
||||||
|
max_kv = max(kv_lens)
|
||||||
|
|
||||||
|
# Build query start locations
|
||||||
|
q_start_cpu = torch.tensor(
|
||||||
|
[0] + [sum(q_lens[: i + 1]) for i in range(len(q_lens))],
|
||||||
|
dtype=torch.int32,
|
||||||
|
)
|
||||||
|
q_start_gpu = q_start_cpu.to(device)
|
||||||
|
|
||||||
|
# Build sequence lengths
|
||||||
|
seq_lens_cpu = torch.tensor(kv_lens, dtype=torch.int32)
|
||||||
|
seq_lens_gpu = seq_lens_cpu.to(device)
|
||||||
|
|
||||||
|
# Build num_computed_tokens (context length for each request)
|
||||||
|
context_lens = [kv_len - q_len for q_len, kv_len in zip(q_lens, kv_lens)]
|
||||||
|
num_computed_tokens_cpu = torch.tensor(context_lens, dtype=torch.int32)
|
||||||
|
|
||||||
|
# Build block table
|
||||||
|
num_blocks_per_req = [(kv + block_size - 1) // block_size for kv in kv_lens]
|
||||||
|
max_num_blocks = max(num_blocks_per_req)
|
||||||
|
|
||||||
|
block_table_cpu = np.zeros((len(requests), max_num_blocks), dtype=np.int32)
|
||||||
|
current_block = 0
|
||||||
|
for i, num_blocks in enumerate(num_blocks_per_req):
|
||||||
|
for j in range(num_blocks):
|
||||||
|
block_table_cpu[i, j] = current_block
|
||||||
|
current_block += 1
|
||||||
|
|
||||||
|
block_table_gpu = torch.from_numpy(block_table_cpu).to(device)
|
||||||
|
|
||||||
|
# Build slot mapping
|
||||||
|
slot_mapping_list = []
|
||||||
|
for i, (q_len, kv_len, num_blocks) in enumerate(
|
||||||
|
zip(q_lens, kv_lens, num_blocks_per_req)
|
||||||
|
):
|
||||||
|
context_len = kv_len - q_len
|
||||||
|
for j in range(q_len):
|
||||||
|
token_kv_idx = context_len + j
|
||||||
|
block_idx = token_kv_idx // block_size
|
||||||
|
offset_in_block = token_kv_idx % block_size
|
||||||
|
global_block_id = block_table_cpu[i, block_idx]
|
||||||
|
slot_id = global_block_id * block_size + offset_in_block
|
||||||
|
slot_mapping_list.append(slot_id)
|
||||||
|
|
||||||
|
slot_mapping = torch.tensor(slot_mapping_list, dtype=torch.int64, device=device)
|
||||||
|
|
||||||
|
# Create CommonAttentionMetadata
|
||||||
|
from vllm.v1.attention.backends.utils import CommonAttentionMetadata
|
||||||
|
|
||||||
|
common_attn_metadata = CommonAttentionMetadata(
|
||||||
|
num_reqs=len(requests),
|
||||||
|
max_query_len=max(q_lens),
|
||||||
|
max_seq_len=max_kv,
|
||||||
|
num_actual_tokens=total_q,
|
||||||
|
query_start_loc=q_start_gpu,
|
||||||
|
query_start_loc_cpu=q_start_cpu,
|
||||||
|
seq_lens=seq_lens_gpu,
|
||||||
|
_seq_lens_cpu=seq_lens_cpu,
|
||||||
|
_num_computed_tokens_cpu=num_computed_tokens_cpu,
|
||||||
|
slot_mapping=slot_mapping,
|
||||||
|
block_table_tensor=block_table_gpu,
|
||||||
|
dcp_local_seq_lens=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use the production build() method
|
||||||
|
metadata = builder_instance.build(
|
||||||
|
common_prefix_len=0,
|
||||||
|
common_attn_metadata=common_attn_metadata,
|
||||||
|
fast_build=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
return metadata, current_block
|
||||||
|
|
||||||
|
|
||||||
|
def _create_input_tensors(
|
||||||
|
total_q: int,
|
||||||
|
mla_dims: dict,
|
||||||
|
query_format: str,
|
||||||
|
device: torch.device,
|
||||||
|
dtype: torch.dtype,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Create input tensors for both decode and prefill modes.
|
||||||
|
|
||||||
|
MLA requires different tensor formats for decode vs prefill:
|
||||||
|
- Decode: Uses kv_lora_rank (512) dimension
|
||||||
|
- Prefill: Uses qk_nope_head_dim (128) to stay under FlashAttention's 256 limit
|
||||||
|
|
||||||
|
Args:
|
||||||
|
total_q: Total number of query tokens
|
||||||
|
mla_dims: MLA dimension configuration
|
||||||
|
query_format: Either "tuple" or "concat"
|
||||||
|
device: Target device
|
||||||
|
dtype: Tensor dtype
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (decode_inputs, prefill_inputs)
|
||||||
|
- decode_inputs: Query tensor(s) for decode mode
|
||||||
|
- prefill_inputs: Dict with 'q', 'k_c_normed', 'k_pe', 'k_scale' for prefill
|
||||||
|
"""
|
||||||
|
if query_format == "tuple":
|
||||||
|
# Decode mode format: (q_nope, q_pe) where q_nope has kv_lora_rank dim
|
||||||
|
q_nope_decode = torch.randn(
|
||||||
|
total_q,
|
||||||
|
mla_dims["num_q_heads"],
|
||||||
|
mla_dims["kv_lora_rank"],
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
q_pe = torch.randn(
|
||||||
|
total_q,
|
||||||
|
mla_dims["num_q_heads"],
|
||||||
|
mla_dims["qk_rope_head_dim"],
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
decode_inputs = (q_nope_decode, q_pe)
|
||||||
|
|
||||||
|
# For prefill, we need q with qk_nope_head_dim instead of kv_lora_rank
|
||||||
|
q_nope_prefill = torch.randn(
|
||||||
|
total_q,
|
||||||
|
mla_dims["num_q_heads"],
|
||||||
|
mla_dims["qk_nope_head_dim"],
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
prefill_q = torch.cat([q_nope_prefill, q_pe], dim=-1)
|
||||||
|
else: # concat
|
||||||
|
decode_inputs = torch.randn(
|
||||||
|
total_q,
|
||||||
|
mla_dims["num_q_heads"],
|
||||||
|
mla_dims["kv_lora_rank"] + mla_dims["qk_rope_head_dim"],
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
# For prefill with concat format
|
||||||
|
prefill_q = torch.randn(
|
||||||
|
total_q,
|
||||||
|
mla_dims["num_q_heads"],
|
||||||
|
mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"],
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create additional inputs needed for prefill forward
|
||||||
|
k_c_normed = torch.randn(
|
||||||
|
total_q,
|
||||||
|
mla_dims["kv_lora_rank"],
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
k_pe = torch.randn(
|
||||||
|
total_q,
|
||||||
|
1, # Single head for MLA
|
||||||
|
mla_dims["qk_rope_head_dim"],
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
k_scale = torch.ones(1, device=device, dtype=torch.float32)
|
||||||
|
|
||||||
|
output = torch.zeros(
|
||||||
|
total_q,
|
||||||
|
mla_dims["num_q_heads"] * mla_dims["v_head_dim"],
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
|
||||||
|
prefill_inputs = {
|
||||||
|
"q": prefill_q,
|
||||||
|
"k_c_normed": k_c_normed,
|
||||||
|
"k_pe": k_pe,
|
||||||
|
"k_scale": k_scale,
|
||||||
|
"output": output,
|
||||||
|
}
|
||||||
|
|
||||||
|
return decode_inputs, prefill_inputs
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Backend Initialization
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _create_backend_impl(
|
||||||
|
backend_cfg: dict,
|
||||||
|
mla_dims: dict,
|
||||||
|
vllm_config: VllmConfig,
|
||||||
|
device: torch.device,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Create backend implementation instance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
backend_cfg: Backend configuration dict
|
||||||
|
mla_dims: MLA dimension configuration
|
||||||
|
vllm_config: VllmConfig instance
|
||||||
|
device: Target device
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (impl, layer, builder_instance)
|
||||||
|
"""
|
||||||
|
# Import backend classes
|
||||||
|
backend_module = importlib.import_module(backend_cfg["module"])
|
||||||
|
impl_class = getattr(backend_module, backend_cfg["impl_class"])
|
||||||
|
|
||||||
|
# Calculate scale
|
||||||
|
scale = 1.0 / np.sqrt(mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"])
|
||||||
|
|
||||||
|
# Create mock kv_b_proj layer for prefill mode
|
||||||
|
mock_kv_b_proj = MockKVBProj(
|
||||||
|
num_heads=mla_dims["num_q_heads"],
|
||||||
|
qk_nope_head_dim=mla_dims["qk_nope_head_dim"],
|
||||||
|
v_head_dim=mla_dims["v_head_dim"],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create impl
|
||||||
|
impl = impl_class(
|
||||||
|
num_heads=mla_dims["num_q_heads"],
|
||||||
|
head_size=mla_dims["head_dim"],
|
||||||
|
scale=scale,
|
||||||
|
num_kv_heads=mla_dims["num_kv_heads"],
|
||||||
|
alibi_slopes=None,
|
||||||
|
sliding_window=None,
|
||||||
|
kv_cache_dtype="auto",
|
||||||
|
logits_soft_cap=None,
|
||||||
|
attn_type="decoder",
|
||||||
|
kv_sharing_target_layer_name=None,
|
||||||
|
q_lora_rank=None,
|
||||||
|
kv_lora_rank=mla_dims["kv_lora_rank"],
|
||||||
|
qk_nope_head_dim=mla_dims["qk_nope_head_dim"],
|
||||||
|
qk_rope_head_dim=mla_dims["qk_rope_head_dim"],
|
||||||
|
qk_head_dim=mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"],
|
||||||
|
v_head_dim=mla_dims["v_head_dim"],
|
||||||
|
kv_b_proj=mock_kv_b_proj,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize DCP attributes
|
||||||
|
if not hasattr(impl, "dcp_world_size") or impl.dcp_world_size in (None, -1):
|
||||||
|
impl.dcp_world_size = 1
|
||||||
|
impl.dcp_rank = 0
|
||||||
|
|
||||||
|
# Create KV cache spec for MockLayer
|
||||||
|
from vllm.v1.kv_cache_interface import FullAttentionSpec
|
||||||
|
|
||||||
|
kv_cache_spec = FullAttentionSpec(
|
||||||
|
block_size=backend_cfg["block_size"] or vllm_config.cache_config.block_size,
|
||||||
|
num_kv_heads=1, # MLA uses 1 KV head
|
||||||
|
head_size=576, # MLA head dim
|
||||||
|
dtype=torch.bfloat16,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create mock layer
|
||||||
|
layer = MockLayer(device, impl=impl, kv_cache_spec=kv_cache_spec)
|
||||||
|
|
||||||
|
# Create builder instance if needed
|
||||||
|
builder_instance = None
|
||||||
|
if backend_cfg["builder_class"]:
|
||||||
|
builder_class = getattr(backend_module, backend_cfg["builder_class"])
|
||||||
|
|
||||||
|
# Populate static_forward_context so builder can find the layer
|
||||||
|
# MockLayer inherits from AttentionLayerBase, so isinstance checks pass
|
||||||
|
vllm_config.compilation_config.static_forward_context = {"placeholder": layer}
|
||||||
|
|
||||||
|
builder_instance = builder_class(
|
||||||
|
kv_cache_spec=kv_cache_spec,
|
||||||
|
layer_names=["placeholder"],
|
||||||
|
vllm_config=vllm_config,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
|
||||||
|
return impl, layer, builder_instance
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Config Helpers
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_mla_dims_from_config(config) -> dict | None:
|
||||||
|
"""
|
||||||
|
Extract MLA dimensions from BenchmarkConfig if all required fields are present.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: BenchmarkConfig instance
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with MLA dimensions if all fields are provided, None otherwise
|
||||||
|
"""
|
||||||
|
# Check if all MLA-specific fields are provided
|
||||||
|
if all(
|
||||||
|
[
|
||||||
|
config.kv_lora_rank is not None,
|
||||||
|
config.qk_nope_head_dim is not None,
|
||||||
|
config.qk_rope_head_dim is not None,
|
||||||
|
config.v_head_dim is not None,
|
||||||
|
]
|
||||||
|
):
|
||||||
|
return {
|
||||||
|
"kv_lora_rank": config.kv_lora_rank,
|
||||||
|
"qk_nope_head_dim": config.qk_nope_head_dim,
|
||||||
|
"qk_rope_head_dim": config.qk_rope_head_dim,
|
||||||
|
"v_head_dim": config.v_head_dim,
|
||||||
|
"num_q_heads": config.num_q_heads,
|
||||||
|
"num_kv_heads": config.num_kv_heads,
|
||||||
|
"head_dim": config.head_dim,
|
||||||
|
}
|
||||||
|
# Fallback: if MLA fields not fully specified, try to construct from basic fields
|
||||||
|
elif config.head_dim == 576:
|
||||||
|
# This looks like a DeepSeek MLA config, use standard dimensions with custom
|
||||||
|
# head count
|
||||||
|
return {
|
||||||
|
"kv_lora_rank": 512,
|
||||||
|
"qk_nope_head_dim": 128,
|
||||||
|
"qk_rope_head_dim": 64,
|
||||||
|
"v_head_dim": 128,
|
||||||
|
"num_q_heads": config.num_q_heads,
|
||||||
|
"num_kv_heads": config.num_kv_heads,
|
||||||
|
"head_dim": config.head_dim,
|
||||||
|
}
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Benchmark Execution
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _run_single_benchmark(
|
||||||
|
config,
|
||||||
|
impl,
|
||||||
|
layer,
|
||||||
|
builder_instance,
|
||||||
|
backend_cfg: dict,
|
||||||
|
mla_dims: dict,
|
||||||
|
device: torch.device,
|
||||||
|
) -> BenchmarkResult:
|
||||||
|
"""
|
||||||
|
Run a single benchmark iteration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: BenchmarkConfig instance
|
||||||
|
impl: Backend implementation instance
|
||||||
|
layer: MockLayer instance
|
||||||
|
builder_instance: Metadata builder instance
|
||||||
|
backend_cfg: Backend configuration dict
|
||||||
|
mla_dims: MLA dimension configuration
|
||||||
|
device: Target device
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BenchmarkResult with timing statistics
|
||||||
|
"""
|
||||||
|
# Parse batch spec
|
||||||
|
requests = parse_batch_spec(config.batch_spec)
|
||||||
|
q_lens = [r.q_len for r in requests]
|
||||||
|
total_q = sum(q_lens)
|
||||||
|
|
||||||
|
# Determine block size
|
||||||
|
block_size = backend_cfg["block_size"] or config.block_size
|
||||||
|
|
||||||
|
# Build metadata
|
||||||
|
metadata, num_blocks = _build_attention_metadata(
|
||||||
|
requests, block_size, device, builder_instance
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create KV cache
|
||||||
|
kv_cache = torch.zeros(
|
||||||
|
num_blocks,
|
||||||
|
block_size,
|
||||||
|
mla_dims["kv_lora_rank"] + mla_dims["qk_rope_head_dim"],
|
||||||
|
device=device,
|
||||||
|
dtype=torch.bfloat16,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create input tensors for both decode and prefill modes
|
||||||
|
decode_inputs, prefill_inputs = _create_input_tensors(
|
||||||
|
total_q,
|
||||||
|
mla_dims,
|
||||||
|
backend_cfg["query_format"],
|
||||||
|
device,
|
||||||
|
torch.bfloat16,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Determine which forward method to use based on metadata
|
||||||
|
if metadata.decode is not None:
|
||||||
|
forward_fn = lambda: impl._forward_decode(
|
||||||
|
decode_inputs, kv_cache, metadata, layer
|
||||||
|
)
|
||||||
|
elif metadata.prefill is not None:
|
||||||
|
forward_fn = lambda: impl._forward_prefill(
|
||||||
|
prefill_inputs["q"],
|
||||||
|
prefill_inputs["k_c_normed"],
|
||||||
|
prefill_inputs["k_pe"],
|
||||||
|
kv_cache,
|
||||||
|
metadata,
|
||||||
|
prefill_inputs["k_scale"],
|
||||||
|
prefill_inputs["output"],
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Metadata has neither decode nor prefill metadata")
|
||||||
|
|
||||||
|
# Warmup
|
||||||
|
for _ in range(config.warmup_iters):
|
||||||
|
forward_fn()
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
|
||||||
|
# Benchmark
|
||||||
|
times = []
|
||||||
|
for _ in range(config.repeats):
|
||||||
|
start = torch.cuda.Event(enable_timing=True)
|
||||||
|
end = torch.cuda.Event(enable_timing=True)
|
||||||
|
|
||||||
|
start.record()
|
||||||
|
for _ in range(config.num_layers):
|
||||||
|
forward_fn()
|
||||||
|
end.record()
|
||||||
|
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
elapsed_ms = start.elapsed_time(end)
|
||||||
|
times.append(elapsed_ms / 1000.0 / config.num_layers)
|
||||||
|
|
||||||
|
mean_time = float(np.mean(times))
|
||||||
|
return BenchmarkResult(
|
||||||
|
config=config,
|
||||||
|
mean_time=mean_time,
|
||||||
|
std_time=float(np.std(times)),
|
||||||
|
min_time=float(np.min(times)),
|
||||||
|
max_time=float(np.max(times)),
|
||||||
|
throughput_tokens_per_sec=total_q / mean_time if mean_time > 0 else 0,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _run_mla_benchmark_batched(
|
||||||
|
backend: str,
|
||||||
|
configs_with_params: list[tuple], # [(config, threshold, num_splits), ...]
|
||||||
|
) -> list[BenchmarkResult]:
|
||||||
|
"""
|
||||||
|
Unified batched MLA benchmark runner for all backends.
|
||||||
|
|
||||||
|
Works for: flashattn_mla, flashmla, flashinfer_mla, cutlass_mla
|
||||||
|
|
||||||
|
This function reuses backend initialization across multiple benchmarks
|
||||||
|
to avoid setup/teardown overhead.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
backend: Backend name
|
||||||
|
configs_with_params: List of (config, threshold, num_splits) tuples
|
||||||
|
- threshold: reorder_batch_threshold (FlashAttn/FlashMLA only)
|
||||||
|
- num_splits: num_kv_splits (CUTLASS only)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of BenchmarkResult objects
|
||||||
|
"""
|
||||||
|
if not configs_with_params:
|
||||||
|
return []
|
||||||
|
|
||||||
|
backend_cfg = _get_backend_config(backend)
|
||||||
|
device = torch.device(configs_with_params[0][0].device)
|
||||||
|
torch.cuda.set_device(device)
|
||||||
|
|
||||||
|
# Determine block size
|
||||||
|
config_block_size = configs_with_params[0][0].block_size
|
||||||
|
block_size = backend_cfg["block_size"] or config_block_size
|
||||||
|
|
||||||
|
# Extract MLA dimensions from the first config
|
||||||
|
first_config = configs_with_params[0][0]
|
||||||
|
mla_dims = _extract_mla_dims_from_config(first_config)
|
||||||
|
|
||||||
|
# If config didn't provide MLA dims, fall back to default model
|
||||||
|
if mla_dims is None:
|
||||||
|
mla_dims = setup_mla_dims("deepseek-v3")
|
||||||
|
|
||||||
|
# Create and set vLLM config for MLA (reused across all benchmarks)
|
||||||
|
vllm_config = create_minimal_vllm_config(
|
||||||
|
model_name="deepseek-v3", # Used only for model path
|
||||||
|
block_size=block_size,
|
||||||
|
mla_dims=mla_dims, # Use custom dims from config or default
|
||||||
|
)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
|
with set_current_vllm_config(vllm_config):
|
||||||
|
# Create backend impl, layer, and builder (reused across benchmarks)
|
||||||
|
impl, layer, builder_instance = _create_backend_impl(
|
||||||
|
backend_cfg, mla_dims, vllm_config, device
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run each benchmark with the shared impl
|
||||||
|
for config, threshold, num_splits in configs_with_params:
|
||||||
|
# Set threshold for this benchmark (FlashAttn/FlashMLA only)
|
||||||
|
original_threshold = None
|
||||||
|
if threshold is not None and builder_instance:
|
||||||
|
original_threshold = builder_instance.reorder_batch_threshold
|
||||||
|
builder_instance.reorder_batch_threshold = threshold
|
||||||
|
|
||||||
|
# Set num_splits for CUTLASS
|
||||||
|
original_num_splits = None
|
||||||
|
if num_splits is not None and hasattr(impl, "_num_kv_splits"):
|
||||||
|
original_num_splits = impl._num_kv_splits
|
||||||
|
impl._num_kv_splits = num_splits
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = _run_single_benchmark(
|
||||||
|
config,
|
||||||
|
impl,
|
||||||
|
layer,
|
||||||
|
builder_instance,
|
||||||
|
backend_cfg,
|
||||||
|
mla_dims,
|
||||||
|
device,
|
||||||
|
)
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Restore original threshold
|
||||||
|
if original_threshold is not None:
|
||||||
|
builder_instance.reorder_batch_threshold = original_threshold
|
||||||
|
|
||||||
|
# Restore original num_splits
|
||||||
|
if original_num_splits is not None:
|
||||||
|
impl._num_kv_splits = original_num_splits
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Public API
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def run_mla_benchmark(
|
||||||
|
backend: str,
|
||||||
|
config,
|
||||||
|
reorder_batch_threshold: int | None = None,
|
||||||
|
num_kv_splits: int | None = None,
|
||||||
|
) -> BenchmarkResult | list[BenchmarkResult]:
|
||||||
|
"""
|
||||||
|
Unified MLA benchmark runner for all backends.
|
||||||
|
|
||||||
|
Works for: flashattn_mla, flashmla, flashinfer_mla, cutlass_mla
|
||||||
|
|
||||||
|
Always uses batched execution internally for optimal performance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
backend: Backend name (flashattn_mla, flashmla, flashinfer_mla, cutlass_mla)
|
||||||
|
config: BenchmarkConfig or list of (BenchmarkConfig, param) tuples
|
||||||
|
reorder_batch_threshold: Threshold override for FlashAttn/FlashMLA
|
||||||
|
(single config mode only)
|
||||||
|
num_kv_splits: Number of KV splits for CUTLASS (single config mode only)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BenchmarkResult (single mode) or list of BenchmarkResult (batched mode)
|
||||||
|
"""
|
||||||
|
# Normalize to batched mode: (config, threshold, num_splits)
|
||||||
|
if isinstance(config, list):
|
||||||
|
# Already in batched format
|
||||||
|
if len(config) > 0 and isinstance(config[0], tuple):
|
||||||
|
# Format: [(cfg, param), ...] where param is threshold or num_splits
|
||||||
|
if backend in ("flashattn_mla", "flashmla"):
|
||||||
|
configs_with_params = [(cfg, param, None) for cfg, param in config]
|
||||||
|
else: # cutlass_mla or flashinfer_mla
|
||||||
|
configs_with_params = [(cfg, None, param) for cfg, param in config]
|
||||||
|
else:
|
||||||
|
# Format: [cfg, ...] - just configs
|
||||||
|
configs_with_params = [(cfg, None, None) for cfg in config]
|
||||||
|
return_single = False
|
||||||
|
else:
|
||||||
|
# Single config: convert to batched format
|
||||||
|
configs_with_params = [(config, reorder_batch_threshold, num_kv_splits)]
|
||||||
|
return_single = True
|
||||||
|
|
||||||
|
# Use unified batched execution
|
||||||
|
results = _run_mla_benchmark_batched(backend, configs_with_params)
|
||||||
|
|
||||||
|
# Return single result or list based on input
|
||||||
|
return results[0] if return_single else results
|
||||||
481
benchmarks/attention_benchmarks/runner.py
Normal file
481
benchmarks/attention_benchmarks/runner.py
Normal file
@@ -0,0 +1,481 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
|
"""
|
||||||
|
Standard attention benchmark runner - shared utilities for non-MLA benchmarks.
|
||||||
|
|
||||||
|
This module provides helpers for running standard attention backends
|
||||||
|
(FlashAttention, Triton, FlashInfer) with real vLLM integration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import types
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
from batch_spec import parse_batch_spec, reorder_for_flashinfer
|
||||||
|
from common import BenchmarkConfig, BenchmarkResult, MockLayer, get_attention_scale
|
||||||
|
|
||||||
|
from vllm.config import (
|
||||||
|
CacheConfig,
|
||||||
|
CompilationConfig,
|
||||||
|
DeviceConfig,
|
||||||
|
LoadConfig,
|
||||||
|
ModelConfig,
|
||||||
|
ParallelConfig,
|
||||||
|
SchedulerConfig,
|
||||||
|
VllmConfig,
|
||||||
|
)
|
||||||
|
from vllm.v1.attention.backends.utils import CommonAttentionMetadata
|
||||||
|
from vllm.v1.kv_cache_interface import FullAttentionSpec
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Backend Configuration
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
_BACKEND_CONFIG = {
|
||||||
|
"flash": {
|
||||||
|
"module": "vllm.v1.attention.backends.flash_attn",
|
||||||
|
"backend_class": "FlashAttentionBackend",
|
||||||
|
"dtype": torch.float16,
|
||||||
|
"cache_layout": "standard",
|
||||||
|
# ^ [2, num_blocks, block_size, num_kv_heads, head_dim]
|
||||||
|
},
|
||||||
|
"triton": {
|
||||||
|
"module": "vllm.v1.attention.backends.triton_attn",
|
||||||
|
"backend_class": "TritonAttentionBackend",
|
||||||
|
"dtype": torch.float32,
|
||||||
|
"cache_layout": "standard",
|
||||||
|
},
|
||||||
|
"flashinfer": {
|
||||||
|
"module": "vllm.v1.attention.backends.flashinfer",
|
||||||
|
"backend_class": "FlashInferBackend",
|
||||||
|
"dtype": torch.float16,
|
||||||
|
"cache_layout": "flashinfer",
|
||||||
|
# ^ [num_blocks, 2, block_size, num_kv_heads, head_dim]
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _get_backend_config(backend: str) -> dict:
|
||||||
|
if backend not in _BACKEND_CONFIG:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unknown backend: {backend}. "
|
||||||
|
f"Available: {', '.join(_BACKEND_CONFIG.keys())}"
|
||||||
|
)
|
||||||
|
return _BACKEND_CONFIG[backend]
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Metadata Building Helpers
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _build_common_attn_metadata(
|
||||||
|
q_lens: list[int],
|
||||||
|
kv_lens: list[int],
|
||||||
|
block_size: int,
|
||||||
|
device: torch.device,
|
||||||
|
) -> CommonAttentionMetadata:
|
||||||
|
"""Build CommonAttentionMetadata from query/kv lengths."""
|
||||||
|
batch_size = len(q_lens)
|
||||||
|
total_tokens = sum(q_lens)
|
||||||
|
|
||||||
|
query_start_loc = torch.zeros(batch_size + 1, dtype=torch.int32, device=device)
|
||||||
|
query_start_loc[1:] = torch.tensor(q_lens, dtype=torch.int32, device=device).cumsum(
|
||||||
|
0
|
||||||
|
)
|
||||||
|
query_start_loc_cpu = query_start_loc.cpu()
|
||||||
|
|
||||||
|
seq_lens = torch.tensor(kv_lens, dtype=torch.int32, device=device)
|
||||||
|
seq_lens_cpu = seq_lens.cpu()
|
||||||
|
max_seq_len = int(seq_lens_cpu.max())
|
||||||
|
|
||||||
|
context_lens = [kv - q for kv, q in zip(kv_lens, q_lens)]
|
||||||
|
num_computed_tokens_cpu = torch.tensor(context_lens, dtype=torch.int32)
|
||||||
|
|
||||||
|
max_blocks = (max(kv_lens) + block_size - 1) // block_size
|
||||||
|
num_blocks = batch_size * max_blocks
|
||||||
|
block_table_tensor = torch.arange(
|
||||||
|
num_blocks, dtype=torch.int32, device=device
|
||||||
|
).view(batch_size, max_blocks)
|
||||||
|
slot_mapping = torch.arange(total_tokens, dtype=torch.int64, device=device)
|
||||||
|
|
||||||
|
max_query_len = max(q_lens)
|
||||||
|
|
||||||
|
return CommonAttentionMetadata(
|
||||||
|
query_start_loc=query_start_loc,
|
||||||
|
query_start_loc_cpu=query_start_loc_cpu,
|
||||||
|
seq_lens=seq_lens,
|
||||||
|
seq_lens_cpu=seq_lens_cpu,
|
||||||
|
num_computed_tokens_cpu=num_computed_tokens_cpu,
|
||||||
|
num_reqs=batch_size,
|
||||||
|
num_actual_tokens=total_tokens,
|
||||||
|
max_query_len=max_query_len,
|
||||||
|
max_seq_len=max_seq_len,
|
||||||
|
block_table_tensor=block_table_tensor,
|
||||||
|
slot_mapping=slot_mapping,
|
||||||
|
causal=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_vllm_config(
|
||||||
|
config: BenchmarkConfig,
|
||||||
|
dtype: torch.dtype,
|
||||||
|
max_num_blocks: int,
|
||||||
|
) -> VllmConfig:
|
||||||
|
"""Create a VllmConfig for benchmarking with mock model methods."""
|
||||||
|
model_config = ModelConfig(
|
||||||
|
model="meta-llama/Meta-Llama-3-8B",
|
||||||
|
tokenizer="meta-llama/Meta-Llama-3-8B",
|
||||||
|
trust_remote_code=False,
|
||||||
|
dtype=dtype,
|
||||||
|
seed=0,
|
||||||
|
max_model_len=1024,
|
||||||
|
)
|
||||||
|
|
||||||
|
cache_config = CacheConfig(
|
||||||
|
block_size=config.block_size,
|
||||||
|
cache_dtype="auto",
|
||||||
|
swap_space=0,
|
||||||
|
)
|
||||||
|
cache_config.num_gpu_blocks = max_num_blocks
|
||||||
|
cache_config.num_cpu_blocks = 0
|
||||||
|
|
||||||
|
parallel_config = ParallelConfig(tensor_parallel_size=1)
|
||||||
|
scheduler_config = SchedulerConfig(
|
||||||
|
max_num_seqs=256,
|
||||||
|
max_num_batched_tokens=8192,
|
||||||
|
max_model_len=8192,
|
||||||
|
is_encoder_decoder=False,
|
||||||
|
enable_chunked_prefill=True,
|
||||||
|
)
|
||||||
|
device_config = DeviceConfig()
|
||||||
|
load_config = LoadConfig()
|
||||||
|
compilation_config = CompilationConfig()
|
||||||
|
|
||||||
|
# Add mock methods for benchmark config values
|
||||||
|
model_config.get_num_layers = types.MethodType(
|
||||||
|
lambda self: config.num_layers, model_config
|
||||||
|
)
|
||||||
|
model_config.get_sliding_window_for_layer = types.MethodType(
|
||||||
|
lambda self, i: None, model_config
|
||||||
|
)
|
||||||
|
model_config.get_logits_soft_cap_for_layer = types.MethodType(
|
||||||
|
lambda self, i: 0.0, model_config
|
||||||
|
)
|
||||||
|
model_config.get_sm_scale_for_layer = types.MethodType(
|
||||||
|
lambda self, i: 1.0 / config.head_dim**0.5, model_config
|
||||||
|
)
|
||||||
|
model_config.get_num_attention_heads = types.MethodType(
|
||||||
|
lambda self, parallel_config=None: config.num_q_heads, model_config
|
||||||
|
)
|
||||||
|
model_config.get_num_kv_heads = types.MethodType(
|
||||||
|
lambda self, parallel_config=None: config.num_kv_heads, model_config
|
||||||
|
)
|
||||||
|
model_config.get_head_size = types.MethodType(
|
||||||
|
lambda self: config.head_dim, model_config
|
||||||
|
)
|
||||||
|
model_config.get_sliding_window = types.MethodType(lambda self: None, model_config)
|
||||||
|
|
||||||
|
return VllmConfig(
|
||||||
|
model_config=model_config,
|
||||||
|
cache_config=cache_config,
|
||||||
|
parallel_config=parallel_config,
|
||||||
|
scheduler_config=scheduler_config,
|
||||||
|
device_config=device_config,
|
||||||
|
load_config=load_config,
|
||||||
|
compilation_config=compilation_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Backend Initialization
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _create_backend_impl(
|
||||||
|
backend_cfg: dict,
|
||||||
|
config: BenchmarkConfig,
|
||||||
|
device: torch.device,
|
||||||
|
):
|
||||||
|
"""Create backend implementation instance."""
|
||||||
|
import importlib
|
||||||
|
|
||||||
|
backend_module = importlib.import_module(backend_cfg["module"])
|
||||||
|
backend_class = getattr(backend_module, backend_cfg["backend_class"])
|
||||||
|
|
||||||
|
scale = get_attention_scale(config.head_dim)
|
||||||
|
dtype = backend_cfg["dtype"]
|
||||||
|
|
||||||
|
impl = backend_class.get_impl_cls()(
|
||||||
|
num_heads=config.num_q_heads,
|
||||||
|
head_size=config.head_dim,
|
||||||
|
scale=scale,
|
||||||
|
num_kv_heads=config.num_kv_heads,
|
||||||
|
alibi_slopes=None,
|
||||||
|
sliding_window=None,
|
||||||
|
kv_cache_dtype="auto",
|
||||||
|
)
|
||||||
|
|
||||||
|
kv_cache_spec = FullAttentionSpec(
|
||||||
|
block_size=config.block_size,
|
||||||
|
num_kv_heads=config.num_kv_heads,
|
||||||
|
head_size=config.head_dim,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
|
||||||
|
layer = MockLayer(device, kv_cache_spec=kv_cache_spec)
|
||||||
|
|
||||||
|
return backend_class, impl, layer, dtype
|
||||||
|
|
||||||
|
|
||||||
|
def _create_metadata_builder(
|
||||||
|
backend_class,
|
||||||
|
kv_cache_spec: FullAttentionSpec,
|
||||||
|
vllm_config: VllmConfig,
|
||||||
|
device: torch.device,
|
||||||
|
):
|
||||||
|
"""Create metadata builder instance."""
|
||||||
|
return backend_class.get_builder_cls()(
|
||||||
|
kv_cache_spec=kv_cache_spec,
|
||||||
|
layer_names=["layer_0"],
|
||||||
|
vllm_config=vllm_config,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Tensor Creation Helpers
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _create_input_tensors(
|
||||||
|
config: BenchmarkConfig,
|
||||||
|
total_q: int,
|
||||||
|
device: torch.device,
|
||||||
|
dtype: torch.dtype,
|
||||||
|
) -> tuple:
|
||||||
|
"""Create Q, K, V input tensors for all layers."""
|
||||||
|
q_list = [
|
||||||
|
torch.randn(
|
||||||
|
total_q, config.num_q_heads, config.head_dim, device=device, dtype=dtype
|
||||||
|
)
|
||||||
|
for _ in range(config.num_layers)
|
||||||
|
]
|
||||||
|
k_list = [
|
||||||
|
torch.randn(
|
||||||
|
total_q, config.num_kv_heads, config.head_dim, device=device, dtype=dtype
|
||||||
|
)
|
||||||
|
for _ in range(config.num_layers)
|
||||||
|
]
|
||||||
|
v_list = [
|
||||||
|
torch.randn(
|
||||||
|
total_q, config.num_kv_heads, config.head_dim, device=device, dtype=dtype
|
||||||
|
)
|
||||||
|
for _ in range(config.num_layers)
|
||||||
|
]
|
||||||
|
return q_list, k_list, v_list
|
||||||
|
|
||||||
|
|
||||||
|
def _create_kv_cache(
|
||||||
|
config: BenchmarkConfig,
|
||||||
|
max_num_blocks: int,
|
||||||
|
cache_layout: str,
|
||||||
|
device: torch.device,
|
||||||
|
dtype: torch.dtype,
|
||||||
|
) -> list:
|
||||||
|
"""Create KV cache tensors for all layers."""
|
||||||
|
if cache_layout == "flashinfer":
|
||||||
|
# FlashInfer layout: [num_blocks, 2, block_size, num_kv_heads, head_dim]
|
||||||
|
cache_list = [
|
||||||
|
torch.zeros(
|
||||||
|
max_num_blocks,
|
||||||
|
2,
|
||||||
|
config.block_size,
|
||||||
|
config.num_kv_heads,
|
||||||
|
config.head_dim,
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
for _ in range(config.num_layers)
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
# Standard layout: [2, num_blocks, block_size, num_kv_heads, head_dim]
|
||||||
|
cache_list = [
|
||||||
|
torch.zeros(
|
||||||
|
2,
|
||||||
|
max_num_blocks,
|
||||||
|
config.block_size,
|
||||||
|
config.num_kv_heads,
|
||||||
|
config.head_dim,
|
||||||
|
device=device,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
for _ in range(config.num_layers)
|
||||||
|
]
|
||||||
|
return cache_list
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Benchmark Execution
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def _run_single_benchmark(
|
||||||
|
config: BenchmarkConfig,
|
||||||
|
impl,
|
||||||
|
layer,
|
||||||
|
q_list: list,
|
||||||
|
k_list: list,
|
||||||
|
v_list: list,
|
||||||
|
cache_list: list,
|
||||||
|
attn_metadata,
|
||||||
|
device: torch.device,
|
||||||
|
dtype: torch.dtype,
|
||||||
|
) -> tuple:
|
||||||
|
"""Run single benchmark iteration with warmup and timing loop."""
|
||||||
|
total_q = q_list[0].shape[0]
|
||||||
|
out = torch.empty(
|
||||||
|
total_q, config.num_q_heads, config.head_dim, device=device, dtype=dtype
|
||||||
|
)
|
||||||
|
|
||||||
|
# Warmup
|
||||||
|
for _ in range(config.warmup_iters):
|
||||||
|
for i in range(config.num_layers):
|
||||||
|
impl.forward(
|
||||||
|
layer,
|
||||||
|
q_list[i],
|
||||||
|
k_list[i],
|
||||||
|
v_list[i],
|
||||||
|
cache_list[i],
|
||||||
|
attn_metadata,
|
||||||
|
output=out,
|
||||||
|
)
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
|
||||||
|
# Benchmark
|
||||||
|
times = []
|
||||||
|
for _ in range(config.repeats):
|
||||||
|
start = torch.cuda.Event(enable_timing=True)
|
||||||
|
end = torch.cuda.Event(enable_timing=True)
|
||||||
|
|
||||||
|
start.record()
|
||||||
|
for i in range(config.num_layers):
|
||||||
|
impl.forward(
|
||||||
|
layer,
|
||||||
|
q_list[i],
|
||||||
|
k_list[i],
|
||||||
|
v_list[i],
|
||||||
|
cache_list[i],
|
||||||
|
attn_metadata,
|
||||||
|
output=out,
|
||||||
|
)
|
||||||
|
end.record()
|
||||||
|
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
elapsed_ms = start.elapsed_time(end)
|
||||||
|
times.append(elapsed_ms / 1000.0 / config.num_layers) # seconds per layer
|
||||||
|
|
||||||
|
mem_stats = {}
|
||||||
|
if config.profile_memory:
|
||||||
|
mem_stats = {
|
||||||
|
"allocated_mb": torch.cuda.memory_allocated(device) / 1024**2,
|
||||||
|
"reserved_mb": torch.cuda.memory_reserved(device) / 1024**2,
|
||||||
|
}
|
||||||
|
|
||||||
|
return times, mem_stats
|
||||||
|
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# Public API
|
||||||
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
def run_attention_benchmark(config: BenchmarkConfig) -> BenchmarkResult:
|
||||||
|
"""
|
||||||
|
Run standard attention benchmark with real kernels.
|
||||||
|
|
||||||
|
Supports: flash, triton, flashinfer
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Benchmark configuration
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
BenchmarkResult with timing and memory statistics
|
||||||
|
"""
|
||||||
|
device = torch.device(config.device)
|
||||||
|
torch.cuda.set_device(device)
|
||||||
|
|
||||||
|
backend_cfg = _get_backend_config(config.backend)
|
||||||
|
|
||||||
|
requests = parse_batch_spec(config.batch_spec)
|
||||||
|
|
||||||
|
if config.backend == "flashinfer":
|
||||||
|
requests = reorder_for_flashinfer(requests)
|
||||||
|
|
||||||
|
q_lens = [r.q_len for r in requests]
|
||||||
|
kv_lens = [r.kv_len for r in requests]
|
||||||
|
total_q = sum(q_lens)
|
||||||
|
max_kv = max(kv_lens)
|
||||||
|
|
||||||
|
max_num_blocks = (max_kv + config.block_size - 1) // config.block_size
|
||||||
|
|
||||||
|
backend_class, impl, layer, dtype = _create_backend_impl(
|
||||||
|
backend_cfg, config, device
|
||||||
|
)
|
||||||
|
|
||||||
|
common_metadata = _build_common_attn_metadata(
|
||||||
|
q_lens, kv_lens, config.block_size, device
|
||||||
|
)
|
||||||
|
|
||||||
|
kv_cache_spec = FullAttentionSpec(
|
||||||
|
block_size=config.block_size,
|
||||||
|
num_kv_heads=config.num_kv_heads,
|
||||||
|
head_size=config.head_dim,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
|
||||||
|
vllm_config = _create_vllm_config(config, dtype, max_num_blocks)
|
||||||
|
|
||||||
|
builder = _create_metadata_builder(
|
||||||
|
backend_class, kv_cache_spec, vllm_config, device
|
||||||
|
)
|
||||||
|
|
||||||
|
attn_metadata = builder.build(
|
||||||
|
common_prefix_len=0,
|
||||||
|
common_attn_metadata=common_metadata,
|
||||||
|
)
|
||||||
|
|
||||||
|
q_list, k_list, v_list = _create_input_tensors(config, total_q, device, dtype)
|
||||||
|
|
||||||
|
cache_list = _create_kv_cache(
|
||||||
|
config, max_num_blocks, backend_cfg["cache_layout"], device, dtype
|
||||||
|
)
|
||||||
|
|
||||||
|
times, mem_stats = _run_single_benchmark(
|
||||||
|
config,
|
||||||
|
impl,
|
||||||
|
layer,
|
||||||
|
q_list,
|
||||||
|
k_list,
|
||||||
|
v_list,
|
||||||
|
cache_list,
|
||||||
|
attn_metadata,
|
||||||
|
device,
|
||||||
|
dtype,
|
||||||
|
)
|
||||||
|
|
||||||
|
mean_time = np.mean(times)
|
||||||
|
throughput = total_q / mean_time if mean_time > 0 else 0
|
||||||
|
|
||||||
|
return BenchmarkResult(
|
||||||
|
config=config,
|
||||||
|
mean_time=mean_time,
|
||||||
|
std_time=np.std(times),
|
||||||
|
min_time=np.min(times),
|
||||||
|
max_time=np.max(times),
|
||||||
|
throughput_tokens_per_sec=throughput,
|
||||||
|
memory_allocated_mb=mem_stats.get("allocated_mb"),
|
||||||
|
memory_reserved_mb=mem_stats.get("reserved_mb"),
|
||||||
|
)
|
||||||
@@ -1,244 +0,0 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
# Copyright (c) Microsoft Corporation.
|
|
||||||
# Licensed under the MIT License.
|
|
||||||
|
|
||||||
from packaging import version
|
|
||||||
|
|
||||||
from vllm.model_executor.layers.quantization.utils.bitblas_utils import (
|
|
||||||
MINIMUM_BITBLAS_VERSION,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
import bitblas
|
|
||||||
|
|
||||||
if version.parse(bitblas.__version__) < version.parse(MINIMUM_BITBLAS_VERSION):
|
|
||||||
raise ImportError(
|
|
||||||
"bitblas version is wrong. Please "
|
|
||||||
f"install bitblas>={MINIMUM_BITBLAS_VERSION}"
|
|
||||||
)
|
|
||||||
except ImportError as e:
|
|
||||||
bitblas_import_exception = e
|
|
||||||
raise ValueError(
|
|
||||||
"Trying to use the bitblas backend, but could not import"
|
|
||||||
f"with the following error: {bitblas_import_exception}. "
|
|
||||||
"Please install bitblas through the following command: "
|
|
||||||
f"`pip install bitblas>={MINIMUM_BITBLAS_VERSION}`"
|
|
||||||
) from bitblas_import_exception
|
|
||||||
|
|
||||||
from bitblas import Matmul, MatmulConfig, auto_detect_nvidia_target
|
|
||||||
|
|
||||||
from vllm.utils.argparse_utils import FlexibleArgumentParser
|
|
||||||
|
|
||||||
parser = FlexibleArgumentParser(
|
|
||||||
description="Benchmark BitBLAS int4 on a specific target."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add arguments to the parser
|
|
||||||
parser.add_argument(
|
|
||||||
"--target",
|
|
||||||
type=str,
|
|
||||||
default=auto_detect_nvidia_target(),
|
|
||||||
help="Specify the target device for benchmarking.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--group_size", type=int, default=None, help="Group size for grouped quantization."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--A_dtype",
|
|
||||||
type=str,
|
|
||||||
default="float16",
|
|
||||||
choices=["float16", "float32", "float64", "int32", "int8"],
|
|
||||||
help="Data type of activation A.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--W_dtype",
|
|
||||||
type=str,
|
|
||||||
default="int4",
|
|
||||||
choices=[
|
|
||||||
"float16",
|
|
||||||
"float32",
|
|
||||||
"float64",
|
|
||||||
"int32",
|
|
||||||
"int8",
|
|
||||||
"int4",
|
|
||||||
"int2",
|
|
||||||
"int1",
|
|
||||||
"nf4",
|
|
||||||
"fp4_e2m1",
|
|
||||||
],
|
|
||||||
help="Data type of weight W.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--accum_dtype",
|
|
||||||
type=str,
|
|
||||||
default="float16",
|
|
||||||
choices=["float16", "int32"],
|
|
||||||
help="Data type for accumulation.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--out_dtype",
|
|
||||||
type=str,
|
|
||||||
default="float16",
|
|
||||||
choices=["float16", "float32", "int32", "int8"],
|
|
||||||
help="Data type for output.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--layout",
|
|
||||||
type=str,
|
|
||||||
default="nt",
|
|
||||||
choices=["nt", "nn"],
|
|
||||||
help="Matrix layout, 'nt' for non-transpose A and transpose W.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--with_bias", action="store_true", help="Include bias in the benchmark."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--with_scaling",
|
|
||||||
action="store_true",
|
|
||||||
help="Include scaling factor in the quantization.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--with_zeros", action="store_true", help="Include zeros in the quantization."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--zeros_mode",
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
choices=["original", "rescale", "quantized"],
|
|
||||||
help="Specify the mode for calculating zeros.",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Parse the arguments
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
# Assign arguments to variables
|
|
||||||
target = args.target
|
|
||||||
A_dtype = args.A_dtype
|
|
||||||
W_dtype = args.W_dtype
|
|
||||||
accum_dtype = args.accum_dtype
|
|
||||||
out_dtype = args.out_dtype
|
|
||||||
layout = args.layout
|
|
||||||
with_bias = args.with_bias
|
|
||||||
group_size = args.group_size
|
|
||||||
with_scaling = args.with_scaling
|
|
||||||
with_zeros = args.with_zeros
|
|
||||||
zeros_mode = args.zeros_mode
|
|
||||||
|
|
||||||
# Define a list of shared arguments that repeat in every config
|
|
||||||
shared_args = [
|
|
||||||
A_dtype,
|
|
||||||
W_dtype,
|
|
||||||
out_dtype,
|
|
||||||
accum_dtype,
|
|
||||||
layout,
|
|
||||||
with_bias,
|
|
||||||
group_size,
|
|
||||||
with_scaling,
|
|
||||||
with_zeros,
|
|
||||||
zeros_mode,
|
|
||||||
]
|
|
||||||
|
|
||||||
# Define just the (M, K, N) shapes in a more compact list
|
|
||||||
shapes = [
|
|
||||||
# square test
|
|
||||||
(1, 16384, 16384),
|
|
||||||
# BLOOM-176B
|
|
||||||
(1, 43008, 14336),
|
|
||||||
(1, 14336, 14336),
|
|
||||||
(1, 57344, 14336),
|
|
||||||
(1, 14336, 57344),
|
|
||||||
# OPT-65B
|
|
||||||
(1, 9216, 9216),
|
|
||||||
(1, 36864, 9216),
|
|
||||||
(1, 9216, 36864),
|
|
||||||
(1, 22016, 8192),
|
|
||||||
# LLAMA-70B/65B
|
|
||||||
(1, 8192, 22016),
|
|
||||||
(1, 8192, 8192),
|
|
||||||
(1, 28672, 8192),
|
|
||||||
(1, 8192, 28672),
|
|
||||||
# square test
|
|
||||||
(16384, 16384, 16384),
|
|
||||||
# BLOOM-176B
|
|
||||||
(8192, 43008, 14336),
|
|
||||||
(8192, 14336, 14336),
|
|
||||||
(8192, 57344, 14336),
|
|
||||||
(8192, 14336, 57344),
|
|
||||||
# OPT-65B
|
|
||||||
(8192, 9216, 9216),
|
|
||||||
(8192, 36864, 9216),
|
|
||||||
(8192, 9216, 36864),
|
|
||||||
(8192, 22016, 8192),
|
|
||||||
# LLAMA-70B/65B
|
|
||||||
(8192, 8192, 22016),
|
|
||||||
(8192, 8192, 8192),
|
|
||||||
(8192, 28672, 8192),
|
|
||||||
(8192, 8192, 28672),
|
|
||||||
]
|
|
||||||
|
|
||||||
# Build test shapes with all the shared arguments
|
|
||||||
test_shapes = [(MatmulConfig, Matmul, (*shape, *shared_args)) for shape in shapes]
|
|
||||||
|
|
||||||
benchmark_sets = []
|
|
||||||
benchmark_sets.extend(test_shapes)
|
|
||||||
|
|
||||||
benchmark_results = {}
|
|
||||||
for config_class, operator, input_args in benchmark_sets:
|
|
||||||
config = config_class(*input_args)
|
|
||||||
matmul = operator(config, target=target, enable_tuning=True)
|
|
||||||
kernel_latency = matmul.profile_latency()
|
|
||||||
|
|
||||||
print("Time cost is: {:.3f} ms".format(kernel_latency))
|
|
||||||
|
|
||||||
profile_config = {
|
|
||||||
f"{operator.__name__}-{'-'.join([str(i) for i in input_args])}": {
|
|
||||||
"BitBLAS_top20_latency": kernel_latency,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
benchmark_results.update(profile_config)
|
|
||||||
|
|
||||||
# Define headers for the table
|
|
||||||
headers = [
|
|
||||||
"PrimFunc",
|
|
||||||
"Input Arguments",
|
|
||||||
"BitBLAS Top20 Latency",
|
|
||||||
]
|
|
||||||
|
|
||||||
# Calculate column widths for pretty printing
|
|
||||||
col_widths = [0, 0, 0]
|
|
||||||
for config_key, values in benchmark_results.items():
|
|
||||||
args_split = config_key.split("-")
|
|
||||||
func_name = args_split[0]
|
|
||||||
input_args_str = "-".join(args_split[1:])
|
|
||||||
col_widths[0] = max(col_widths[0], len(func_name) + 2, len(headers[0]) + 2)
|
|
||||||
col_widths[1] = max(col_widths[1], len(input_args_str) + 2, len(headers[1]) + 2)
|
|
||||||
col_widths[2] = max(
|
|
||||||
col_widths[2],
|
|
||||||
len(f"{values['BitBLAS_top20_latency']:.3f} ms") + 2,
|
|
||||||
len(headers[2]) + 2,
|
|
||||||
)
|
|
||||||
# break only if you want to measure widths from a single example;
|
|
||||||
# otherwise, let it loop over all items.
|
|
||||||
|
|
||||||
# Print header
|
|
||||||
for i, header in enumerate(headers):
|
|
||||||
headers[i] = header.ljust(col_widths[i])
|
|
||||||
print("".join(headers))
|
|
||||||
print("-" * sum(col_widths))
|
|
||||||
|
|
||||||
# Print rows
|
|
||||||
for config_key, values in benchmark_results.items():
|
|
||||||
args_split = config_key.split("-")
|
|
||||||
func_name = args_split[0]
|
|
||||||
input_args_str = "-".join(args_split[1:])
|
|
||||||
row = [
|
|
||||||
func_name,
|
|
||||||
input_args_str,
|
|
||||||
f"{values['BitBLAS_top20_latency']:.3f} ms",
|
|
||||||
]
|
|
||||||
row_str = "".join(
|
|
||||||
[str(cell).ljust(col_widths[idx]) for idx, cell in enumerate(row)]
|
|
||||||
)
|
|
||||||
print(row_str)
|
|
||||||
@@ -197,7 +197,7 @@ def bench_run(
|
|||||||
)
|
)
|
||||||
|
|
||||||
kernel = mk.FusedMoEModularKernel(
|
kernel = mk.FusedMoEModularKernel(
|
||||||
MoEPrepareAndFinalizeNoEP(defer_input_quant=True),
|
MoEPrepareAndFinalizeNoEP(),
|
||||||
CutlassExpertsFp4(
|
CutlassExpertsFp4(
|
||||||
make_dummy_moe_config(),
|
make_dummy_moe_config(),
|
||||||
quant_config=quant_config,
|
quant_config=quant_config,
|
||||||
@@ -242,7 +242,7 @@ def bench_run(
|
|||||||
)
|
)
|
||||||
|
|
||||||
kernel = mk.FusedMoEModularKernel(
|
kernel = mk.FusedMoEModularKernel(
|
||||||
MoEPrepareAndFinalizeNoEP(defer_input_quant=True),
|
MoEPrepareAndFinalizeNoEP(),
|
||||||
CutlassExpertsFp4(
|
CutlassExpertsFp4(
|
||||||
make_dummy_moe_config(),
|
make_dummy_moe_config(),
|
||||||
quant_config=quant_config,
|
quant_config=quant_config,
|
||||||
|
|||||||
@@ -6,12 +6,6 @@ import torch.utils.benchmark as benchmark
|
|||||||
from benchmark_shapes import WEIGHT_SHAPES
|
from benchmark_shapes import WEIGHT_SHAPES
|
||||||
|
|
||||||
from vllm import _custom_ops as ops
|
from vllm import _custom_ops as ops
|
||||||
from vllm.model_executor.layers.quantization.gptq_marlin_24 import (
|
|
||||||
GPTQ_MARLIN_24_MAX_PARALLEL,
|
|
||||||
GPTQ_MARLIN_24_MIN_THREAD_N,
|
|
||||||
GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES,
|
|
||||||
GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES,
|
|
||||||
)
|
|
||||||
from vllm.model_executor.layers.quantization.utils.allspark_utils import (
|
from vllm.model_executor.layers.quantization.utils.allspark_utils import (
|
||||||
ALLSPARK_AMPERE_M_CUBLAS_THRESHOLD,
|
ALLSPARK_AMPERE_M_CUBLAS_THRESHOLD,
|
||||||
ALLSPARK_SUPPORTED_QUANT_TYPES,
|
ALLSPARK_SUPPORTED_QUANT_TYPES,
|
||||||
@@ -34,9 +28,6 @@ from vllm.model_executor.layers.quantization.utils.marlin_utils_test import (
|
|||||||
awq_marlin_quantize,
|
awq_marlin_quantize,
|
||||||
marlin_quantize,
|
marlin_quantize,
|
||||||
)
|
)
|
||||||
from vllm.model_executor.layers.quantization.utils.marlin_utils_test_24 import (
|
|
||||||
marlin_24_quantize,
|
|
||||||
)
|
|
||||||
from vllm.model_executor.layers.quantization.utils.quant_utils import (
|
from vllm.model_executor.layers.quantization.utils.quant_utils import (
|
||||||
gptq_pack,
|
gptq_pack,
|
||||||
gptq_quantize_weights,
|
gptq_quantize_weights,
|
||||||
@@ -78,14 +69,7 @@ def bench_run(
|
|||||||
if size_k % group_size != 0:
|
if size_k % group_size != 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
marlin_24_supported = (
|
repack_supported = group_size in MARLIN_SUPPORTED_GROUP_SIZES
|
||||||
quant_type in GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES
|
|
||||||
and group_size in GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES
|
|
||||||
)
|
|
||||||
repack_supported = (
|
|
||||||
quant_type in GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES
|
|
||||||
and group_size in MARLIN_SUPPORTED_GROUP_SIZES
|
|
||||||
)
|
|
||||||
allspark_supported = (
|
allspark_supported = (
|
||||||
quant_type in ALLSPARK_SUPPORTED_QUANT_TYPES
|
quant_type in ALLSPARK_SUPPORTED_QUANT_TYPES
|
||||||
and group_size == -1
|
and group_size == -1
|
||||||
@@ -126,14 +110,6 @@ def bench_run(
|
|||||||
marlin_sort_indices,
|
marlin_sort_indices,
|
||||||
)
|
)
|
||||||
|
|
||||||
def gen_marlin_24_params():
|
|
||||||
marlin_24_w_ref = marlin_24_q_w_comp = marlin_24_meta = marlin_24_s = None
|
|
||||||
if marlin_24_supported:
|
|
||||||
(marlin_24_w_ref, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s) = (
|
|
||||||
marlin_24_quantize(b, quant_type, group_size)
|
|
||||||
)
|
|
||||||
return (marlin_24_w_ref, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s)
|
|
||||||
|
|
||||||
def gen_repack_params():
|
def gen_repack_params():
|
||||||
q_w_gptq = None
|
q_w_gptq = None
|
||||||
repack_sort_indices = None
|
repack_sort_indices = None
|
||||||
@@ -188,9 +164,6 @@ def bench_run(
|
|||||||
marlin_g_idx,
|
marlin_g_idx,
|
||||||
marlin_sort_indices,
|
marlin_sort_indices,
|
||||||
) = gen_marlin_params()
|
) = gen_marlin_params()
|
||||||
marlin_24_w_ref, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s = (
|
|
||||||
gen_marlin_24_params()
|
|
||||||
)
|
|
||||||
q_w_gptq, repack_sort_indices = gen_repack_params()
|
q_w_gptq, repack_sort_indices = gen_repack_params()
|
||||||
qw_reorder, s_reorder, zp_reorder, sm_count, sm_version, CUBLAS_M_THRESHOLD = (
|
qw_reorder, s_reorder, zp_reorder, sm_count, sm_version, CUBLAS_M_THRESHOLD = (
|
||||||
gen_allspark_params()
|
gen_allspark_params()
|
||||||
@@ -200,9 +173,6 @@ def bench_run(
|
|||||||
marlin_workspace = MarlinWorkspace(
|
marlin_workspace = MarlinWorkspace(
|
||||||
size_n, GPTQ_MARLIN_MIN_THREAD_N, GPTQ_MARLIN_MAX_PARALLEL
|
size_n, GPTQ_MARLIN_MIN_THREAD_N, GPTQ_MARLIN_MAX_PARALLEL
|
||||||
)
|
)
|
||||||
marlin_24_workspace = MarlinWorkspace(
|
|
||||||
size_n, GPTQ_MARLIN_24_MIN_THREAD_N, GPTQ_MARLIN_24_MAX_PARALLEL
|
|
||||||
)
|
|
||||||
|
|
||||||
globals = {
|
globals = {
|
||||||
# Gen params
|
# Gen params
|
||||||
@@ -222,12 +192,6 @@ def bench_run(
|
|||||||
"marlin_sort_indices": marlin_sort_indices,
|
"marlin_sort_indices": marlin_sort_indices,
|
||||||
"marlin_workspace": marlin_workspace,
|
"marlin_workspace": marlin_workspace,
|
||||||
"is_k_full": is_k_full,
|
"is_k_full": is_k_full,
|
||||||
# Marlin_24 params
|
|
||||||
"marlin_24_w_ref": marlin_24_w_ref,
|
|
||||||
"marlin_24_q_w_comp": marlin_24_q_w_comp,
|
|
||||||
"marlin_24_meta": marlin_24_meta,
|
|
||||||
"marlin_24_s": marlin_24_s,
|
|
||||||
"marlin_24_workspace": marlin_24_workspace,
|
|
||||||
# GPTQ params
|
# GPTQ params
|
||||||
"q_w_gptq": q_w_gptq,
|
"q_w_gptq": q_w_gptq,
|
||||||
"repack_sort_indices": repack_sort_indices,
|
"repack_sort_indices": repack_sort_indices,
|
||||||
@@ -240,7 +204,6 @@ def bench_run(
|
|||||||
"CUBLAS_M_THRESHOLD": CUBLAS_M_THRESHOLD,
|
"CUBLAS_M_THRESHOLD": CUBLAS_M_THRESHOLD,
|
||||||
# Kernels
|
# Kernels
|
||||||
"marlin_gemm": ops.marlin_gemm,
|
"marlin_gemm": ops.marlin_gemm,
|
||||||
"gptq_marlin_24_gemm": ops.gptq_marlin_24_gemm,
|
|
||||||
"gptq_marlin_repack": ops.gptq_marlin_repack,
|
"gptq_marlin_repack": ops.gptq_marlin_repack,
|
||||||
"allspark_w8a16_gemm": ops.allspark_w8a16_gemm,
|
"allspark_w8a16_gemm": ops.allspark_w8a16_gemm,
|
||||||
}
|
}
|
||||||
@@ -281,17 +244,6 @@ def bench_run(
|
|||||||
).blocked_autorange(min_run_time=min_run_time)
|
).blocked_autorange(min_run_time=min_run_time)
|
||||||
)
|
)
|
||||||
|
|
||||||
if marlin_24_supported:
|
|
||||||
results.append(
|
|
||||||
benchmark.Timer(
|
|
||||||
stmt="output = gptq_marlin_24_gemm(a, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s, marlin_24_workspace.scratch, quant_type, size_m, size_n, size_k)", # noqa: E501
|
|
||||||
globals=globals,
|
|
||||||
label=label,
|
|
||||||
sub_label=sub_label,
|
|
||||||
description="gptq_marlin_24_gemm",
|
|
||||||
).blocked_autorange(min_run_time=min_run_time)
|
|
||||||
)
|
|
||||||
|
|
||||||
if repack_supported:
|
if repack_supported:
|
||||||
results.append(
|
results.append(
|
||||||
benchmark.Timer(
|
benchmark.Timer(
|
||||||
|
|||||||
@@ -10,8 +10,6 @@ from transformers import AutoConfig
|
|||||||
|
|
||||||
from vllm.model_executor.layers.fused_moe import fused_topk
|
from vllm.model_executor.layers.fused_moe import fused_topk
|
||||||
from vllm.model_executor.layers.fused_moe.moe_permute_unpermute import (
|
from vllm.model_executor.layers.fused_moe.moe_permute_unpermute import (
|
||||||
_moe_permute,
|
|
||||||
_moe_unpermute_and_reduce,
|
|
||||||
moe_permute,
|
moe_permute,
|
||||||
moe_unpermute,
|
moe_unpermute,
|
||||||
)
|
)
|
||||||
@@ -41,7 +39,6 @@ def benchmark_permute(
|
|||||||
use_fp8_w8a8: bool,
|
use_fp8_w8a8: bool,
|
||||||
use_int8_w8a16: bool,
|
use_int8_w8a16: bool,
|
||||||
num_iters: int = 100,
|
num_iters: int = 100,
|
||||||
use_customized_permute: bool = False,
|
|
||||||
) -> float:
|
) -> float:
|
||||||
# init_dtype = torch.float16 if use_fp8_w8a8 else dtype
|
# init_dtype = torch.float16 if use_fp8_w8a8 else dtype
|
||||||
hidden_states = torch.randn(num_tokens, hidden_size, dtype=dtype)
|
hidden_states = torch.randn(num_tokens, hidden_size, dtype=dtype)
|
||||||
@@ -64,14 +61,7 @@ def benchmark_permute(
|
|||||||
input_gating.copy_(gating_output[i])
|
input_gating.copy_(gating_output[i])
|
||||||
|
|
||||||
def run():
|
def run():
|
||||||
if use_customized_permute:
|
moe_permute(
|
||||||
(
|
|
||||||
permuted_hidden_states,
|
|
||||||
a1q_scale,
|
|
||||||
first_token_off,
|
|
||||||
inv_perm_idx,
|
|
||||||
m_indices,
|
|
||||||
) = moe_permute(
|
|
||||||
qhidden_states,
|
qhidden_states,
|
||||||
a1q_scale=None,
|
a1q_scale=None,
|
||||||
topk_ids=topk_ids,
|
topk_ids=topk_ids,
|
||||||
@@ -79,14 +69,6 @@ def benchmark_permute(
|
|||||||
expert_map=None,
|
expert_map=None,
|
||||||
align_block_size=align_block_size,
|
align_block_size=align_block_size,
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
(
|
|
||||||
permuted_hidden_states,
|
|
||||||
a1q_scale,
|
|
||||||
sorted_token_ids,
|
|
||||||
expert_ids,
|
|
||||||
inv_perm,
|
|
||||||
) = _moe_permute(qhidden_states, None, topk_ids, num_experts, None, 16)
|
|
||||||
|
|
||||||
# JIT compilation & warmup
|
# JIT compilation & warmup
|
||||||
run()
|
run()
|
||||||
@@ -131,11 +113,9 @@ def benchmark_unpermute(
|
|||||||
use_fp8_w8a8: bool,
|
use_fp8_w8a8: bool,
|
||||||
use_int8_w8a16: bool,
|
use_int8_w8a16: bool,
|
||||||
num_iters: int = 100,
|
num_iters: int = 100,
|
||||||
use_customized_permute: bool = False,
|
|
||||||
) -> float:
|
) -> float:
|
||||||
# init_dtype = torch.float16 if use_fp8_w8a8 else dtype
|
# init_dtype = torch.float16 if use_fp8_w8a8 else dtype
|
||||||
hidden_states = torch.randn(num_tokens, hidden_size, dtype=dtype)
|
hidden_states = torch.randn(num_tokens, hidden_size, dtype=dtype)
|
||||||
output_hidden_states = torch.empty_like(hidden_states)
|
|
||||||
if use_fp8_w8a8:
|
if use_fp8_w8a8:
|
||||||
align_block_size = 128 # deepgemm needs 128 m aligned block
|
align_block_size = 128 # deepgemm needs 128 m aligned block
|
||||||
qhidden_states, scale = _fp8_quantize(hidden_states, None, None)
|
qhidden_states, scale = _fp8_quantize(hidden_states, None, None)
|
||||||
@@ -150,13 +130,12 @@ def benchmark_unpermute(
|
|||||||
)
|
)
|
||||||
|
|
||||||
def prepare():
|
def prepare():
|
||||||
if use_customized_permute:
|
|
||||||
(
|
(
|
||||||
permuted_hidden_states,
|
permuted_hidden_states,
|
||||||
a1q_scale,
|
_,
|
||||||
first_token_off,
|
first_token_off,
|
||||||
inv_perm_idx,
|
inv_perm_idx,
|
||||||
m_indices,
|
_,
|
||||||
) = moe_permute(
|
) = moe_permute(
|
||||||
qhidden_states,
|
qhidden_states,
|
||||||
a1q_scale=None,
|
a1q_scale=None,
|
||||||
@@ -170,35 +149,10 @@ def benchmark_unpermute(
|
|||||||
permuted_hidden_states.to(dtype),
|
permuted_hidden_states.to(dtype),
|
||||||
first_token_off,
|
first_token_off,
|
||||||
inv_perm_idx,
|
inv_perm_idx,
|
||||||
m_indices,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
(
|
|
||||||
permuted_qhidden_states,
|
|
||||||
a1q_scale,
|
|
||||||
sorted_token_ids,
|
|
||||||
expert_ids,
|
|
||||||
inv_perm,
|
|
||||||
) = _moe_permute(
|
|
||||||
qhidden_states, None, topk_ids, num_experts, None, block_m=16
|
|
||||||
)
|
|
||||||
# convert to fp16/bf16 as gemm output
|
|
||||||
return (
|
|
||||||
permuted_qhidden_states.to(dtype),
|
|
||||||
a1q_scale,
|
|
||||||
sorted_token_ids,
|
|
||||||
expert_ids,
|
|
||||||
inv_perm,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def run(input: tuple):
|
def run(input: tuple):
|
||||||
if use_customized_permute:
|
(permuted_hidden_states, first_token_off, inv_perm_idx) = input
|
||||||
(
|
|
||||||
permuted_hidden_states,
|
|
||||||
first_token_off,
|
|
||||||
inv_perm_idx,
|
|
||||||
m_indices,
|
|
||||||
) = input
|
|
||||||
output = torch.empty_like(hidden_states)
|
output = torch.empty_like(hidden_states)
|
||||||
moe_unpermute(
|
moe_unpermute(
|
||||||
output,
|
output,
|
||||||
@@ -207,21 +161,6 @@ def benchmark_unpermute(
|
|||||||
inv_perm_idx,
|
inv_perm_idx,
|
||||||
first_token_off,
|
first_token_off,
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
(
|
|
||||||
permuted_hidden_states,
|
|
||||||
a1q_scale,
|
|
||||||
sorted_token_ids,
|
|
||||||
expert_ids,
|
|
||||||
inv_perm,
|
|
||||||
) = input
|
|
||||||
_moe_unpermute_and_reduce(
|
|
||||||
output_hidden_states,
|
|
||||||
permuted_hidden_states,
|
|
||||||
inv_perm,
|
|
||||||
topk_weights,
|
|
||||||
True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# JIT compilation & warmup
|
# JIT compilation & warmup
|
||||||
input = prepare()
|
input = prepare()
|
||||||
@@ -276,8 +215,7 @@ class BenchmarkWorker:
|
|||||||
dtype: torch.dtype,
|
dtype: torch.dtype,
|
||||||
use_fp8_w8a8: bool,
|
use_fp8_w8a8: bool,
|
||||||
use_int8_w8a16: bool,
|
use_int8_w8a16: bool,
|
||||||
use_customized_permute: bool = False,
|
) -> tuple[float, float]:
|
||||||
) -> tuple[dict[str, int], float]:
|
|
||||||
set_random_seed(self.seed)
|
set_random_seed(self.seed)
|
||||||
|
|
||||||
permute_time = benchmark_permute(
|
permute_time = benchmark_permute(
|
||||||
@@ -289,7 +227,6 @@ class BenchmarkWorker:
|
|||||||
use_fp8_w8a8,
|
use_fp8_w8a8,
|
||||||
use_int8_w8a16,
|
use_int8_w8a16,
|
||||||
num_iters=100,
|
num_iters=100,
|
||||||
use_customized_permute=use_customized_permute,
|
|
||||||
)
|
)
|
||||||
unpermute_time = benchmark_unpermute(
|
unpermute_time = benchmark_unpermute(
|
||||||
num_tokens,
|
num_tokens,
|
||||||
@@ -300,7 +237,6 @@ class BenchmarkWorker:
|
|||||||
use_fp8_w8a8,
|
use_fp8_w8a8,
|
||||||
use_int8_w8a16,
|
use_int8_w8a16,
|
||||||
num_iters=100,
|
num_iters=100,
|
||||||
use_customized_permute=use_customized_permute,
|
|
||||||
)
|
)
|
||||||
return permute_time, unpermute_time
|
return permute_time, unpermute_time
|
||||||
|
|
||||||
@@ -347,7 +283,6 @@ def main(args: argparse.Namespace):
|
|||||||
dtype = torch.float16 if current_platform.is_rocm() else config.dtype
|
dtype = torch.float16 if current_platform.is_rocm() else config.dtype
|
||||||
use_fp8_w8a8 = args.dtype == "fp8_w8a8"
|
use_fp8_w8a8 = args.dtype == "fp8_w8a8"
|
||||||
use_int8_w8a16 = args.dtype == "int8_w8a16"
|
use_int8_w8a16 = args.dtype == "int8_w8a16"
|
||||||
use_customized_permute = args.use_customized_permute
|
|
||||||
|
|
||||||
if args.batch_size is None:
|
if args.batch_size is None:
|
||||||
batch_sizes = [
|
batch_sizes = [
|
||||||
@@ -399,7 +334,6 @@ def main(args: argparse.Namespace):
|
|||||||
dtype,
|
dtype,
|
||||||
use_fp8_w8a8,
|
use_fp8_w8a8,
|
||||||
use_int8_w8a16,
|
use_int8_w8a16,
|
||||||
use_customized_permute,
|
|
||||||
)
|
)
|
||||||
for batch_size in batch_sizes
|
for batch_size in batch_sizes
|
||||||
],
|
],
|
||||||
@@ -419,7 +353,6 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--dtype", type=str, choices=["auto", "fp8_w8a8", "int8_w8a16"], default="auto"
|
"--dtype", type=str, choices=["auto", "fp8_w8a8", "int8_w8a16"], default="auto"
|
||||||
)
|
)
|
||||||
parser.add_argument("--use-customized-permute", action="store_true")
|
|
||||||
parser.add_argument("--seed", type=int, default=0)
|
parser.add_argument("--seed", type=int, default=0)
|
||||||
parser.add_argument("--batch-size", type=int, required=False)
|
parser.add_argument("--batch-size", type=int, required=False)
|
||||||
parser.add_argument("--trust-remote-code", action="store_true")
|
parser.add_argument("--trust-remote-code", action="store_true")
|
||||||
|
|||||||
@@ -22,8 +22,8 @@ from vllm.utils.argparse_utils import FlexibleArgumentParser
|
|||||||
|
|
||||||
mp.set_start_method("spawn", force=True)
|
mp.set_start_method("spawn", force=True)
|
||||||
|
|
||||||
assert current_platform.is_cuda(), (
|
assert current_platform.is_cuda() or current_platform.is_rocm(), (
|
||||||
"Only support tune w8a8 block fp8 kernel on CUDA device."
|
"Only support tune w8a8 block fp8 kernel on CUDA/ROCm device."
|
||||||
)
|
)
|
||||||
|
|
||||||
DTYPE_MAP = {
|
DTYPE_MAP = {
|
||||||
|
|||||||
@@ -24,6 +24,12 @@
|
|||||||
typedef __hip_bfloat16 __nv_bfloat16;
|
typedef __hip_bfloat16 __nv_bfloat16;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(__gfx942__)
|
||||||
|
constexpr float kFp8ScaleDivisor = 224.f;
|
||||||
|
#else
|
||||||
|
constexpr float kFp8ScaleDivisor = 448.f;
|
||||||
|
#endif
|
||||||
|
|
||||||
void swap_blocks(torch::Tensor& src, torch::Tensor& dst,
|
void swap_blocks(torch::Tensor& src, torch::Tensor& dst,
|
||||||
int64_t block_size_in_bytes,
|
int64_t block_size_in_bytes,
|
||||||
const torch::Tensor& block_mapping) {
|
const torch::Tensor& block_mapping) {
|
||||||
@@ -401,8 +407,7 @@ __global__ void concat_and_cache_ds_mla_kernel(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Compute the scale for the tile
|
// Compute the scale for the tile
|
||||||
float tile_scale = max_abs / 448.f;
|
float tile_scale = fmaxf(max_abs / kFp8ScaleDivisor, FLT_MIN);
|
||||||
tile_scale = fmaxf(tile_scale, FLT_MIN);
|
|
||||||
|
|
||||||
// The first lane of each half-warp writes the scale to kv_cache
|
// The first lane of each half-warp writes the scale to kv_cache
|
||||||
if ((lane_idx == 0) || (lane_idx == 16)) {
|
if ((lane_idx == 0) || (lane_idx == 16)) {
|
||||||
@@ -471,11 +476,8 @@ __global__ void indexer_k_quant_and_cache_kernel(
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(__gfx942__)
|
float scale = fmaxf(amax, 1e-4) / kFp8ScaleDivisor;
|
||||||
float scale = fmaxf(amax, 1e-4) / 224.0f;
|
|
||||||
#else
|
|
||||||
float scale = fmaxf(amax, 1e-4) / 448.0f;
|
|
||||||
#endif
|
|
||||||
if (use_ue8m0) {
|
if (use_ue8m0) {
|
||||||
scale = exp2f(ceilf(log2f(scale)));
|
scale = exp2f(ceilf(log2f(scale)));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -116,7 +116,7 @@ class Dequantizer4b {
|
|||||||
scalar_vec_t output_vec_0(wb_0);
|
scalar_vec_t output_vec_0(wb_0);
|
||||||
scalar_vec_t output_vec_1(wb_1);
|
scalar_vec_t output_vec_1(wb_1);
|
||||||
|
|
||||||
// AMX needs to interlave K elements to pack as 32 bits
|
// AMX needs to interleave K elements to pack as 32 bits
|
||||||
if constexpr (isa == ISA::AMX) {
|
if constexpr (isa == ISA::AMX) {
|
||||||
vec_op::interleave_save(output_vec_0, output_vec_1, curr_weight);
|
vec_op::interleave_save(output_vec_0, output_vec_1, curr_weight);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -360,13 +360,14 @@ void onednn_scaled_mm(
|
|||||||
const std::optional<torch::Tensor>& azp, // [M] or [1]
|
const std::optional<torch::Tensor>& azp, // [M] or [1]
|
||||||
const std::optional<torch::Tensor>& azp_adj, // [M] or [1]
|
const std::optional<torch::Tensor>& azp_adj, // [M] or [1]
|
||||||
const std::optional<torch::Tensor>& bias, // [N]
|
const std::optional<torch::Tensor>& bias, // [N]
|
||||||
int64_t handler) {
|
const torch::Tensor& handler_tensor) {
|
||||||
CPU_KERNEL_GUARD_IN(onednn_scaled_mm)
|
CPU_KERNEL_GUARD_IN(onednn_scaled_mm)
|
||||||
TORCH_CHECK(a.dim() == 2);
|
TORCH_CHECK(a.dim() == 2);
|
||||||
TORCH_CHECK(a.is_contiguous());
|
TORCH_CHECK(a.is_contiguous());
|
||||||
TORCH_CHECK(c.is_contiguous());
|
TORCH_CHECK(c.is_contiguous());
|
||||||
W8A8MatMulPrimitiveHandler* ptr =
|
W8A8MatMulPrimitiveHandler* ptr =
|
||||||
reinterpret_cast<W8A8MatMulPrimitiveHandler*>(handler);
|
reinterpret_cast<W8A8MatMulPrimitiveHandler*>(
|
||||||
|
handler_tensor.item<int64_t>());
|
||||||
const int32_t* azp_ptr = nullptr;
|
const int32_t* azp_ptr = nullptr;
|
||||||
if (azp.has_value()) {
|
if (azp.has_value()) {
|
||||||
azp_ptr = azp->data_ptr<int32_t>();
|
azp_ptr = azp->data_ptr<int32_t>();
|
||||||
@@ -519,13 +520,14 @@ int64_t create_onednn_mm_handler(const torch::Tensor& b,
|
|||||||
|
|
||||||
void onednn_mm(torch::Tensor& c, // [M, OC], row-major
|
void onednn_mm(torch::Tensor& c, // [M, OC], row-major
|
||||||
const torch::Tensor& a, // [M, IC], row-major
|
const torch::Tensor& a, // [M, IC], row-major
|
||||||
const std::optional<torch::Tensor>& bias, int64_t handler) {
|
const std::optional<torch::Tensor>& bias,
|
||||||
|
const torch::Tensor& handler_tensor) {
|
||||||
CPU_KERNEL_GUARD_IN(onednn_mm)
|
CPU_KERNEL_GUARD_IN(onednn_mm)
|
||||||
TORCH_CHECK(a.dim() == 2);
|
TORCH_CHECK(a.dim() == 2);
|
||||||
TORCH_CHECK(a.stride(-1) == 1);
|
TORCH_CHECK(a.stride(-1) == 1);
|
||||||
TORCH_CHECK(c.stride(-1) == 1);
|
TORCH_CHECK(c.stride(-1) == 1);
|
||||||
MatMulPrimitiveHandler* ptr =
|
MatMulPrimitiveHandler* ptr =
|
||||||
reinterpret_cast<MatMulPrimitiveHandler*>(handler);
|
reinterpret_cast<MatMulPrimitiveHandler*>(handler_tensor.item<int64_t>());
|
||||||
|
|
||||||
// ACL matmuls expect contiguous source tensors
|
// ACL matmuls expect contiguous source tensors
|
||||||
#ifdef VLLM_USE_ACL
|
#ifdef VLLM_USE_ACL
|
||||||
|
|||||||
@@ -237,10 +237,10 @@ struct ThreadSHMContext {
|
|||||||
class SHMManager {
|
class SHMManager {
|
||||||
public:
|
public:
|
||||||
explicit SHMManager(const std::string& name, const int rank,
|
explicit SHMManager(const std::string& name, const int rank,
|
||||||
const int group_size)
|
const int group_size, const int thread_num)
|
||||||
: _rank(rank),
|
: _rank(rank),
|
||||||
_group_size(group_size),
|
_group_size(group_size),
|
||||||
_thread_num(omp_get_max_threads()),
|
_thread_num(thread_num),
|
||||||
_shm_names({""}),
|
_shm_names({""}),
|
||||||
_shared_mem_ptrs({nullptr}),
|
_shared_mem_ptrs({nullptr}),
|
||||||
_shm_ctx(nullptr) {
|
_shm_ctx(nullptr) {
|
||||||
@@ -282,11 +282,11 @@ class SHMManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int64_t create_singleton_instance(const std::string& name,
|
static int64_t create_singleton_instance(const std::string& name,
|
||||||
const int group_size,
|
const int group_size, const int rank,
|
||||||
const int rank) {
|
const int thread_num) {
|
||||||
std::lock_guard<std::mutex> guard(SingletonInstancesLock);
|
std::lock_guard<std::mutex> guard(SingletonInstancesLock);
|
||||||
SingletonInstances.emplace_back(
|
SingletonInstances.emplace_back(
|
||||||
std::make_unique<SHMManager>(name, rank, group_size));
|
std::make_unique<SHMManager>(name, rank, group_size, thread_num));
|
||||||
return static_cast<int64_t>(SingletonInstances.size() - 1);
|
return static_cast<int64_t>(SingletonInstances.size() - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -854,8 +854,9 @@ std::vector<torch::Tensor> shm_recv_tensor_list(int64_t handle, int64_t src) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int64_t init_shm_manager(const std::string& name, const int64_t group_size,
|
int64_t init_shm_manager(const std::string& name, const int64_t group_size,
|
||||||
const int64_t rank) {
|
const int64_t rank, const int64_t thread_num) {
|
||||||
return SHMManager::create_singleton_instance(name, group_size, rank);
|
return SHMManager::create_singleton_instance(name, group_size, rank,
|
||||||
|
thread_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string join_shm_manager(int64_t handle, const std::string& name) {
|
std::string join_shm_manager(int64_t handle, const std::string& name) {
|
||||||
|
|||||||
@@ -19,13 +19,14 @@ void onednn_scaled_mm(torch::Tensor& c, const torch::Tensor& a,
|
|||||||
const std::optional<torch::Tensor>& azp,
|
const std::optional<torch::Tensor>& azp,
|
||||||
const std::optional<torch::Tensor>& azp_adj,
|
const std::optional<torch::Tensor>& azp_adj,
|
||||||
const std::optional<torch::Tensor>& bias,
|
const std::optional<torch::Tensor>& bias,
|
||||||
int64_t handler);
|
const torch::Tensor& handler_tensor);
|
||||||
|
|
||||||
int64_t create_onednn_mm_handler(const torch::Tensor& b,
|
int64_t create_onednn_mm_handler(const torch::Tensor& b,
|
||||||
int64_t primitive_cache_size);
|
int64_t primitive_cache_size);
|
||||||
|
|
||||||
void onednn_mm(torch::Tensor& c, const torch::Tensor& a,
|
void onednn_mm(torch::Tensor& c, const torch::Tensor& a,
|
||||||
const std::optional<torch::Tensor>& bias, int64_t handler);
|
const std::optional<torch::Tensor>& bias,
|
||||||
|
const torch::Tensor& handler_tensor);
|
||||||
|
|
||||||
bool is_onednn_acl_supported();
|
bool is_onednn_acl_supported();
|
||||||
|
|
||||||
@@ -34,7 +35,7 @@ void mla_decode_kvcache(torch::Tensor& out, torch::Tensor& query,
|
|||||||
torch::Tensor& block_tables, torch::Tensor& seq_lens);
|
torch::Tensor& block_tables, torch::Tensor& seq_lens);
|
||||||
|
|
||||||
int64_t init_shm_manager(const std::string& name, const int64_t group_size,
|
int64_t init_shm_manager(const std::string& name, const int64_t group_size,
|
||||||
const int64_t rank);
|
const int64_t rank, const int64_t thread_num);
|
||||||
|
|
||||||
std::string join_shm_manager(int64_t handle, const std::string& name);
|
std::string join_shm_manager(int64_t handle, const std::string& name);
|
||||||
|
|
||||||
@@ -196,7 +197,7 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
|
|||||||
// oneDNN GEMM
|
// oneDNN GEMM
|
||||||
ops.def(
|
ops.def(
|
||||||
"onednn_mm(Tensor! c, Tensor a, Tensor? bias, "
|
"onednn_mm(Tensor! c, Tensor a, Tensor? bias, "
|
||||||
"int handler) -> ()");
|
"Tensor handler_tensor) -> ()");
|
||||||
ops.impl("onednn_mm", torch::kCPU, &onednn_mm);
|
ops.impl("onednn_mm", torch::kCPU, &onednn_mm);
|
||||||
|
|
||||||
// Check if oneDNN was built with ACL backend
|
// Check if oneDNN was built with ACL backend
|
||||||
@@ -212,7 +213,7 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
|
|||||||
// oneDNN scaled_mm for W8A8 with static per-tensor activation quantization
|
// oneDNN scaled_mm for W8A8 with static per-tensor activation quantization
|
||||||
ops.def(
|
ops.def(
|
||||||
"onednn_scaled_mm(Tensor! c, Tensor a, Tensor a_scales, Tensor? azp, "
|
"onednn_scaled_mm(Tensor! c, Tensor a, Tensor a_scales, Tensor? azp, "
|
||||||
"Tensor? azp_adj, Tensor? bias, int handler) -> ()");
|
"Tensor? azp_adj, Tensor? bias, Tensor handler_tensor) -> ()");
|
||||||
ops.impl("onednn_scaled_mm", torch::kCPU, &onednn_scaled_mm);
|
ops.impl("onednn_scaled_mm", torch::kCPU, &onednn_scaled_mm);
|
||||||
|
|
||||||
// Compute int8 quantized tensor for given scaling factor.
|
// Compute int8 quantized tensor for given scaling factor.
|
||||||
@@ -231,7 +232,9 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
|
|||||||
|
|
||||||
// SHM CCL
|
// SHM CCL
|
||||||
#if defined(__AVX512F__) || (defined(__aarch64__) && !defined(__APPLE__))
|
#if defined(__AVX512F__) || (defined(__aarch64__) && !defined(__APPLE__))
|
||||||
ops.def("init_shm_manager(str name, int group_size, int rank) -> int",
|
ops.def(
|
||||||
|
"init_shm_manager(str name, int group_size, int rank, int thread_num) -> "
|
||||||
|
"int",
|
||||||
&init_shm_manager);
|
&init_shm_manager);
|
||||||
ops.def("join_shm_manager(int handle, str name) -> str", &join_shm_manager);
|
ops.def("join_shm_manager(int handle, str name) -> str", &join_shm_manager);
|
||||||
ops.def("shm_allreduce(int handle, Tensor! data) -> ()");
|
ops.def("shm_allreduce(int handle, Tensor! data) -> ()");
|
||||||
@@ -291,7 +294,7 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
|
|||||||
"value_cache, Tensor(a3!) output, Tensor query_start_loc, Tensor "
|
"value_cache, Tensor(a3!) output, Tensor query_start_loc, Tensor "
|
||||||
"seq_lens, float scale, bool causal, Tensor? alibi_slopes, SymInt "
|
"seq_lens, float scale, bool causal, Tensor? alibi_slopes, SymInt "
|
||||||
"sliding_window_left, SymInt sliding_window_right, Tensor block_table, "
|
"sliding_window_left, SymInt sliding_window_right, Tensor block_table, "
|
||||||
"float softcap, Tensor sheduler_metadata, Tensor? s_aux) -> ()",
|
"float softcap, Tensor scheduler_metadata, Tensor? s_aux) -> ()",
|
||||||
&cpu_attention_with_kv_cache);
|
&cpu_attention_with_kv_cache);
|
||||||
|
|
||||||
// placeholders
|
// placeholders
|
||||||
|
|||||||
@@ -73,25 +73,40 @@ void moe_permute(
|
|||||||
get_ptr<int64_t>(expert_first_token_offset), n_token, n_expert,
|
get_ptr<int64_t>(expert_first_token_offset), n_token, n_expert,
|
||||||
n_local_expert, topk, sorter, get_ptr<int>(sort_workspace), stream);
|
n_local_expert, topk, sorter, get_ptr<int>(sort_workspace), stream);
|
||||||
|
|
||||||
|
// DeepGEMM: use getMIndices kernel to compute
|
||||||
|
// 1) align_expert_first_token_offset (aligned prefix offsets)
|
||||||
|
// 2) m_indices (expert id for each aligned row)
|
||||||
|
// eg. expert0: 3, expert1: 5, expert2: 2 tokens respectively
|
||||||
|
// expert_first_token_offset = [0, 3, 8, 10], align_block_size = 4
|
||||||
|
// expert0: 3->4, expert1: 5->8, expert2: 2->4
|
||||||
|
// align_expert_first_token_offset = [0, 4, 12, 16]
|
||||||
|
// so m_indices = [0,0,0,0, 1,1,1,1,1,1,1,1, 2,2,2,2]
|
||||||
|
torch::Tensor align_expert_first_token_offset;
|
||||||
|
const int64_t* aligned_expert_first_token_offset_ptr = nullptr;
|
||||||
|
if (align_block_size.has_value()) {
|
||||||
|
align_expert_first_token_offset =
|
||||||
|
torch::zeros_like(expert_first_token_offset);
|
||||||
|
getMIndices(get_ptr<int64_t>(expert_first_token_offset),
|
||||||
|
get_ptr<int64_t>(align_expert_first_token_offset),
|
||||||
|
get_ptr<int>(m_indices), n_local_expert, align_block_size_value,
|
||||||
|
stream);
|
||||||
|
aligned_expert_first_token_offset_ptr =
|
||||||
|
get_ptr<int64_t>(align_expert_first_token_offset);
|
||||||
|
}
|
||||||
|
|
||||||
// dispatch expandInputRowsKernelLauncher
|
// dispatch expandInputRowsKernelLauncher
|
||||||
MOE_DISPATCH(input.scalar_type(), [&] {
|
MOE_DISPATCH(input.scalar_type(), [&] {
|
||||||
expandInputRowsKernelLauncher<scalar_t>(
|
expandInputRowsKernelLauncher<scalar_t>(
|
||||||
get_ptr<scalar_t>(input), get_ptr<scalar_t>(permuted_input),
|
get_ptr<scalar_t>(input), get_ptr<scalar_t>(permuted_input),
|
||||||
get_ptr<int>(permuted_experts_id), get_ptr<int>(sorted_row_idx),
|
get_ptr<int>(permuted_experts_id), get_ptr<int>(sorted_row_idx),
|
||||||
get_ptr<int>(inv_permuted_idx), get_ptr<int>(permuted_idx),
|
get_ptr<int>(inv_permuted_idx), get_ptr<int>(permuted_idx),
|
||||||
get_ptr<int64_t>(expert_first_token_offset), n_token, valid_num_ptr,
|
get_ptr<int64_t>(expert_first_token_offset),
|
||||||
n_hidden, topk, n_local_expert, align_block_size_value, stream);
|
aligned_expert_first_token_offset_ptr, n_token, valid_num_ptr, n_hidden,
|
||||||
|
topk, n_local_expert, align_block_size_value, stream);
|
||||||
});
|
});
|
||||||
|
|
||||||
// get m_indices and update expert_first_token_offset with align block
|
|
||||||
// this is only required for DeepGemm and not required for CUTLASS group gemm
|
// this is only required for DeepGemm and not required for CUTLASS group gemm
|
||||||
if (align_block_size.has_value()) {
|
if (align_block_size.has_value()) {
|
||||||
auto align_expert_first_token_offset =
|
|
||||||
torch::zeros_like(expert_first_token_offset);
|
|
||||||
getMIndices(get_ptr<int64_t>(expert_first_token_offset),
|
|
||||||
get_ptr<int64_t>(align_expert_first_token_offset),
|
|
||||||
get_ptr<int>(m_indices), n_local_expert, align_block_size_value,
|
|
||||||
stream);
|
|
||||||
expert_first_token_offset.copy_(align_expert_first_token_offset);
|
expert_first_token_offset.copy_(align_expert_first_token_offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,7 +60,8 @@ void expandInputRowsKernelLauncher(
|
|||||||
T const* unpermuted_input, T* permuted_output, int* sorted_experts,
|
T const* unpermuted_input, T* permuted_output, int* sorted_experts,
|
||||||
int const* expanded_dest_row_to_expanded_source_row,
|
int const* expanded_dest_row_to_expanded_source_row,
|
||||||
int* expanded_source_row_to_expanded_dest_row, int* permuted_idx,
|
int* expanded_source_row_to_expanded_dest_row, int* permuted_idx,
|
||||||
int64_t* expert_first_token_offset, int64_t const num_rows,
|
int64_t const* expert_first_token_offset,
|
||||||
|
int64_t const* aligned_expert_first_token_offset, int64_t const num_rows,
|
||||||
int64_t const* num_valid_tokens_ptr, int64_t const cols, int const k,
|
int64_t const* num_valid_tokens_ptr, int64_t const cols, int const k,
|
||||||
int num_local_experts, const int& align_block_size, cudaStream_t stream);
|
int num_local_experts, const int& align_block_size, cudaStream_t stream);
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,8 @@ __global__ void expandInputRowsKernel(
|
|||||||
T const* unpermuted_input, T* permuted_output, int* sorted_experts,
|
T const* unpermuted_input, T* permuted_output, int* sorted_experts,
|
||||||
int const* expanded_dest_row_to_expanded_source_row,
|
int const* expanded_dest_row_to_expanded_source_row,
|
||||||
int* expanded_source_row_to_expanded_dest_row, int* permuted_idx,
|
int* expanded_source_row_to_expanded_dest_row, int* permuted_idx,
|
||||||
int64_t* expert_first_token_offset, int64_t const num_rows,
|
int64_t const* expert_first_token_offset,
|
||||||
|
int64_t const* aligned_expert_first_token_offset, int64_t const num_rows,
|
||||||
int64_t const* num_dest_rows, int64_t const cols, int64_t k,
|
int64_t const* num_dest_rows, int64_t const cols, int64_t k,
|
||||||
int num_local_experts, int align_block_size) {
|
int num_local_experts, int align_block_size) {
|
||||||
// Reverse permutation map.
|
// Reverse permutation map.
|
||||||
@@ -18,35 +19,22 @@ __global__ void expandInputRowsKernel(
|
|||||||
expanded_dest_row_to_expanded_source_row[expanded_dest_row];
|
expanded_dest_row_to_expanded_source_row[expanded_dest_row];
|
||||||
int expert_id = sorted_experts[expanded_dest_row];
|
int expert_id = sorted_experts[expanded_dest_row];
|
||||||
|
|
||||||
extern __shared__ int64_t smem_expert_first_token_offset[];
|
|
||||||
if constexpr (ALIGN_BLOCK_SIZE) {
|
if constexpr (ALIGN_BLOCK_SIZE) {
|
||||||
// load g2s
|
// convert (unaligned) expanded_dest_row -> aligned expanded_dest_row.
|
||||||
for (int idx = threadIdx.x; idx < num_local_experts + 1;
|
// aligned_expert_first_token_offset[e] provides the aligned prefix start
|
||||||
idx += blockDim.x) {
|
// for expert e. For non-local experts we map to the end (total aligned M).
|
||||||
smem_expert_first_token_offset[idx] =
|
int64_t aligned_base = 0;
|
||||||
__ldg(expert_first_token_offset + idx);
|
int64_t token_offset_in_expert = 0;
|
||||||
|
if (expert_id >= num_local_experts) {
|
||||||
|
aligned_base =
|
||||||
|
__ldg(aligned_expert_first_token_offset + num_local_experts);
|
||||||
|
token_offset_in_expert = 0;
|
||||||
|
} else {
|
||||||
|
aligned_base = __ldg(aligned_expert_first_token_offset + expert_id);
|
||||||
|
token_offset_in_expert =
|
||||||
|
expanded_dest_row - __ldg(expert_first_token_offset + expert_id);
|
||||||
}
|
}
|
||||||
__syncthreads();
|
expanded_dest_row = aligned_base + token_offset_in_expert;
|
||||||
int lane_idx = threadIdx.x & 31;
|
|
||||||
|
|
||||||
if (lane_idx == 0) {
|
|
||||||
// set token_offset_in_expert = 0 if this expert is not local expert
|
|
||||||
int token_offset_in_expert =
|
|
||||||
expert_id >= num_local_experts
|
|
||||||
? 0
|
|
||||||
: expanded_dest_row - smem_expert_first_token_offset[expert_id];
|
|
||||||
int64_t accumulate_align_offset = 0;
|
|
||||||
#pragma unroll 1
|
|
||||||
for (int eidx = 1; eidx <= min(expert_id, num_local_experts); eidx++) {
|
|
||||||
auto n_token_in_expert = smem_expert_first_token_offset[eidx] -
|
|
||||||
smem_expert_first_token_offset[eidx - 1];
|
|
||||||
accumulate_align_offset += (n_token_in_expert + align_block_size - 1) /
|
|
||||||
align_block_size * align_block_size;
|
|
||||||
}
|
|
||||||
expanded_dest_row = accumulate_align_offset + token_offset_in_expert;
|
|
||||||
}
|
|
||||||
// lane0 shuffle broadcast align_expanded_dest_row
|
|
||||||
expanded_dest_row = __shfl_sync(0xffffffff, expanded_dest_row, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (threadIdx.x == 0) {
|
if (threadIdx.x == 0) {
|
||||||
@@ -88,7 +76,8 @@ void expandInputRowsKernelLauncher(
|
|||||||
T const* unpermuted_input, T* permuted_output, int* sorted_experts,
|
T const* unpermuted_input, T* permuted_output, int* sorted_experts,
|
||||||
int const* expanded_dest_row_to_expanded_source_row,
|
int const* expanded_dest_row_to_expanded_source_row,
|
||||||
int* expanded_source_row_to_expanded_dest_row, int* permuted_idx,
|
int* expanded_source_row_to_expanded_dest_row, int* permuted_idx,
|
||||||
int64_t* expert_first_token_offset, int64_t const num_rows,
|
int64_t const* expert_first_token_offset,
|
||||||
|
int64_t const* aligned_expert_first_token_offset, int64_t const num_rows,
|
||||||
int64_t const* num_valid_tokens_ptr, int64_t const cols, int const k,
|
int64_t const* num_valid_tokens_ptr, int64_t const cols, int const k,
|
||||||
int num_local_experts, const int& align_block_size, cudaStream_t stream) {
|
int num_local_experts, const int& align_block_size, cudaStream_t stream) {
|
||||||
int64_t const blocks = num_rows * k;
|
int64_t const blocks = num_rows * k;
|
||||||
@@ -104,14 +93,12 @@ void expandInputRowsKernelLauncher(
|
|||||||
bool is_align_block_size = align_block_size != -1;
|
bool is_align_block_size = align_block_size != -1;
|
||||||
auto func = func_map[is_check_skip][is_align_block_size];
|
auto func = func_map[is_check_skip][is_align_block_size];
|
||||||
|
|
||||||
int64_t smem_size = sizeof(int64_t) * (num_local_experts + 1);
|
func<<<blocks, threads, 0, stream>>>(
|
||||||
|
|
||||||
func<<<blocks, threads, smem_size, stream>>>(
|
|
||||||
unpermuted_input, permuted_output, sorted_experts,
|
unpermuted_input, permuted_output, sorted_experts,
|
||||||
expanded_dest_row_to_expanded_source_row,
|
expanded_dest_row_to_expanded_source_row,
|
||||||
expanded_source_row_to_expanded_dest_row, permuted_idx,
|
expanded_source_row_to_expanded_dest_row, permuted_idx,
|
||||||
expert_first_token_offset, num_rows, num_valid_tokens_ptr, cols, k,
|
expert_first_token_offset, aligned_expert_first_token_offset, num_rows,
|
||||||
num_local_experts, align_block_size);
|
num_valid_tokens_ptr, cols, k, num_local_experts, align_block_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T, class U>
|
template <class T, class U>
|
||||||
|
|||||||
@@ -288,8 +288,8 @@ def generate_sch_sig(schedule_config: ScheduleConfig) -> str:
|
|||||||
)
|
)
|
||||||
cluster_shape = (
|
cluster_shape = (
|
||||||
f"{schedule_config.cluster_shape_mnk[0]}"
|
f"{schedule_config.cluster_shape_mnk[0]}"
|
||||||
+ f"x{schedule_config.cluster_shape_mnk[1]}"
|
f"x{schedule_config.cluster_shape_mnk[1]}"
|
||||||
+ f"x{schedule_config.cluster_shape_mnk[2]}"
|
f"x{schedule_config.cluster_shape_mnk[2]}"
|
||||||
)
|
)
|
||||||
kernel_schedule = VLLMKernelScheduleTag[schedule_config.kernel_schedule].split(
|
kernel_schedule = VLLMKernelScheduleTag[schedule_config.kernel_schedule].split(
|
||||||
"::"
|
"::"
|
||||||
@@ -301,7 +301,7 @@ def generate_sch_sig(schedule_config: ScheduleConfig) -> str:
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
f"{tile_shape}_{cluster_shape}_{kernel_schedule}"
|
f"{tile_shape}_{cluster_shape}_{kernel_schedule}"
|
||||||
+ f"_{epilogue_schedule}_{tile_scheduler}"
|
f"_{epilogue_schedule}_{tile_scheduler}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,203 +0,0 @@
|
|||||||
Contains code from https://github.com/IST-DASLab/Sparse-Marlin/
|
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2024 Roberto Lopez Castro (roberto.lopez.castro@udc.es). All
|
|
||||||
* Rights Reserved.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
namespace marlin_24 {
|
|
||||||
|
|
||||||
constexpr int ceildiv(int a, int b) { return (a + b - 1) / b; }
|
|
||||||
|
|
||||||
// Instances of `Vec` are used to organize groups of >>registers<<, as needed
|
|
||||||
// for instance as inputs to tensor core operations. Consequently, all
|
|
||||||
// corresponding index accesses must be compile-time constants, which is why we
|
|
||||||
// extensively use `#pragma unroll` throughout the kernel code to guarantee
|
|
||||||
// this.
|
|
||||||
template <typename T, int n>
|
|
||||||
struct Vec {
|
|
||||||
T elems[n];
|
|
||||||
__device__ T& operator[](int i) { return elems[i]; }
|
|
||||||
};
|
|
||||||
|
|
||||||
template <int M_, int N_, int K_>
|
|
||||||
struct ShapeBase {
|
|
||||||
static constexpr int M = M_, N = N_, K = K_;
|
|
||||||
};
|
|
||||||
|
|
||||||
using I4 = Vec<int, 4>;
|
|
||||||
|
|
||||||
// Matrix fragments for tensor core instructions; their precise layout is
|
|
||||||
// documented here:
|
|
||||||
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#matrix-fragments-for-mma-m16n8k16-with-floating-point-type
|
|
||||||
using FragA = Vec<half2, 4>;
|
|
||||||
using FragB = Vec<half2, 2>;
|
|
||||||
using FragM = Vec<uint, 1>;
|
|
||||||
using FragC = Vec<float, 4>;
|
|
||||||
using FragS = Vec<half2, 1>; // quantization scales
|
|
||||||
|
|
||||||
} // namespace marlin_24
|
|
||||||
@@ -1,136 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2024 Roberto Lopez Castro (roberto.lopez.castro@udc.es). All
|
|
||||||
* Rights Reserved.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
#include "base.h"
|
|
||||||
|
|
||||||
namespace marlin_24 {
|
|
||||||
// Predicated asynchronous global->shared copy; used for inputs A where we apply
|
|
||||||
// predication to handle batchsizes that are not multiples of 16.
|
|
||||||
__device__ inline void cp_async4_pred_zfill(void* smem_ptr,
|
|
||||||
const void* glob_ptr,
|
|
||||||
bool pred = true,
|
|
||||||
const bool zfill = false) {
|
|
||||||
const int BYTES = 16;
|
|
||||||
int src_in_bytes = (zfill ? 0 : BYTES);
|
|
||||||
uint32_t smem = static_cast<uint32_t>(__cvta_generic_to_shared(smem_ptr));
|
|
||||||
asm volatile(
|
|
||||||
"{\n"
|
|
||||||
" .reg .pred p;\n"
|
|
||||||
" setp.ne.b32 p, %0, 0;\n"
|
|
||||||
" @p cp.async.cg.shared.global [%1], [%2], %3;\n"
|
|
||||||
"}\n" ::"r"((int)pred),
|
|
||||||
"r"(smem), "l"(glob_ptr), "n"(BYTES), "r"(src_in_bytes));
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ inline void cp_async4_pred(void* smem_ptr, const void* glob_ptr,
|
|
||||||
bool pred = true) {
|
|
||||||
const int BYTES = 16;
|
|
||||||
uint32_t smem = static_cast<uint32_t>(__cvta_generic_to_shared(smem_ptr));
|
|
||||||
asm volatile(
|
|
||||||
"{\n"
|
|
||||||
" .reg .pred p;\n"
|
|
||||||
" setp.ne.b32 p, %0, 0;\n"
|
|
||||||
" @p cp.async.cg.shared.global [%1], [%2], %3;\n"
|
|
||||||
"}\n" ::"r"((int)pred),
|
|
||||||
"r"(smem), "l"(glob_ptr), "n"(BYTES));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Asynchronous global->shared copy
|
|
||||||
__device__ inline void cp_async4(void* smem_ptr, const void* glob_ptr) {
|
|
||||||
const int BYTES = 16;
|
|
||||||
uint32_t smem = static_cast<uint32_t>(__cvta_generic_to_shared(smem_ptr));
|
|
||||||
asm volatile(
|
|
||||||
"{\n"
|
|
||||||
" cp.async.cg.shared.global [%0], [%1], %2;\n"
|
|
||||||
"}\n" ::"r"(smem),
|
|
||||||
"l"(glob_ptr), "n"(BYTES));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Async copy fence.
|
|
||||||
__device__ inline void cp_async_fence() {
|
|
||||||
asm volatile("cp.async.commit_group;\n" ::);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until at most `n` async copy stages are still pending.
|
|
||||||
template <int n>
|
|
||||||
__device__ inline void cp_async_wait() {
|
|
||||||
asm volatile("cp.async.wait_group %0;\n" ::"n"(n));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instruction for loading a full 16x16 matrix fragment of operand A from shared
|
|
||||||
// memory, directly in tensor core layout.
|
|
||||||
__device__ inline void ldsm4(FragA& frag_a, const void* smem_ptr) {
|
|
||||||
uint32_t* a = reinterpret_cast<uint32_t*>(&frag_a);
|
|
||||||
uint32_t smem = static_cast<uint32_t>(__cvta_generic_to_shared(smem_ptr));
|
|
||||||
asm volatile("ldmatrix.sync.aligned.m8n8.x4.shared.b16 {%0,%1,%2,%3}, [%4];\n"
|
|
||||||
: "=r"(a[0]), "=r"(a[1]), "=r"(a[2]), "=r"(a[3])
|
|
||||||
: "r"(smem));
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ inline void ldsm4_m(FragM& frag_m, const void* smem_ptr) {
|
|
||||||
uint32_t* a = reinterpret_cast<uint32_t*>(&frag_m);
|
|
||||||
uint32_t smem = static_cast<uint32_t>(__cvta_generic_to_shared(smem_ptr));
|
|
||||||
asm volatile("ldmatrix.sync.aligned.m8n8.x2.shared.b16 {%0,%1}, [%2];\n"
|
|
||||||
: "=r"(a[0]), "=r"(a[1])
|
|
||||||
: "r"(smem));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instruction for loading a full 16x16 matrix fragment of operand A from shared
|
|
||||||
// memory, directly in tensor core layout.
|
|
||||||
__device__ inline void ldsm4_t(FragA& frag_a, const void* smem_ptr) {
|
|
||||||
uint32_t* a = reinterpret_cast<uint32_t*>(&frag_a);
|
|
||||||
uint32_t smem = static_cast<uint32_t>(__cvta_generic_to_shared(smem_ptr));
|
|
||||||
asm volatile(
|
|
||||||
"ldmatrix.sync.aligned.m8n8.x4.trans.shared.b16 {%0,%1,%2,%3}, [%4];\n"
|
|
||||||
: "=r"(a[0]), "=r"(a[1]), "=r"(a[2]), "=r"(a[3])
|
|
||||||
: "r"(smem));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until barrier reaches `count`, then lock for current threadblock.
|
|
||||||
__device__ inline void barrier_acquire(int* lock, int count) {
|
|
||||||
if (threadIdx.x == 0) {
|
|
||||||
int state = -1;
|
|
||||||
do
|
|
||||||
// Guarantee that subsequent writes by this threadblock will be visible
|
|
||||||
// globally.
|
|
||||||
asm volatile("ld.global.acquire.gpu.b32 %0, [%1];\n"
|
|
||||||
: "=r"(state)
|
|
||||||
: "l"(lock));
|
|
||||||
while (state != count);
|
|
||||||
}
|
|
||||||
__syncthreads();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Release barrier and increment visitation count.
|
|
||||||
__device__ inline void barrier_release(int* lock, bool reset = false) {
|
|
||||||
__syncthreads();
|
|
||||||
if (threadIdx.x == 0) {
|
|
||||||
if (reset) {
|
|
||||||
lock[0] = 0;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
int val = 1;
|
|
||||||
// Make sure that all writes since acquiring this barrier are visible
|
|
||||||
// globally, while releasing the barrier.
|
|
||||||
asm volatile("fence.acq_rel.gpu;\n");
|
|
||||||
asm volatile("red.relaxed.gpu.global.add.s32 [%0], %1;\n"
|
|
||||||
:
|
|
||||||
: "l"(lock), "r"(val));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // namespace marlin_24
|
|
||||||
@@ -1,191 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2024 Roberto Lopez Castro (roberto.lopez.castro@udc.es). All
|
|
||||||
* Rights Reserved.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
#include "base.h"
|
|
||||||
#include <cudaTypedefs.h>
|
|
||||||
|
|
||||||
namespace marlin_24 {
|
|
||||||
|
|
||||||
// On CUDA earlier than 12.5, the ordered_metadata version of this instruction
|
|
||||||
// is not supported. On later versions of CUDA the version without ordered
|
|
||||||
// metadata results in the following warning:
|
|
||||||
// | Advisory: Modifier ‘.sp::ordered_metadata’ should be used on instruction
|
|
||||||
// | ‘mma’ instead of modifier ‘.sp’ as it is expected to have substantially
|
|
||||||
// | reduced performance on some future architectures
|
|
||||||
#if defined CUDA_VERSION && CUDA_VERSION >= 12050
|
|
||||||
#define MMA_SP_INST \
|
|
||||||
"mma.sp::ordered_metadata.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 "
|
|
||||||
#else
|
|
||||||
#define MMA_SP_INST "mma.sp.sync.aligned.m16n8k32.row.col.f32.f16.f16.f32 "
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// m16n8k32 sparse tensor core mma instruction with fp16 inputs and fp32
|
|
||||||
// output/accumulation.
|
|
||||||
__device__ inline void mma_sp(const FragB& a_frag0, const FragB& a_frag1,
|
|
||||||
const FragA& frag_b, FragC& frag_c, FragM& frag_m,
|
|
||||||
const int psel) {
|
|
||||||
const uint32_t* a0 = reinterpret_cast<const uint32_t*>(&a_frag0);
|
|
||||||
const uint32_t* a1 = reinterpret_cast<const uint32_t*>(&a_frag1);
|
|
||||||
const uint32_t* b = reinterpret_cast<const uint32_t*>(&frag_b);
|
|
||||||
const uint32_t* e = reinterpret_cast<const uint32_t*>(&frag_m);
|
|
||||||
|
|
||||||
float* c = reinterpret_cast<float*>(&frag_c);
|
|
||||||
if (psel == 0) {
|
|
||||||
asm volatile(MMA_SP_INST
|
|
||||||
"{%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9, %10,%11}, "
|
|
||||||
"{%12,%13,%14,%15}, %16, 0x0;\n"
|
|
||||||
: "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3])
|
|
||||||
: "r"(a0[0]), "r"(a1[0]), "r"(a0[1]), "r"(a1[1]), "r"(b[0]),
|
|
||||||
"r"(b[2]), "r"(b[4]), "r"(b[6]), "f"(c[0]), "f"(c[1]),
|
|
||||||
"f"(c[2]), "f"(c[3]), "r"(e[0]));
|
|
||||||
asm volatile(MMA_SP_INST
|
|
||||||
"{%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9, %10,%11}, "
|
|
||||||
"{%12,%13,%14,%15}, %16, 0x0;\n"
|
|
||||||
: "=f"(c[4]), "=f"(c[5]), "=f"(c[6]), "=f"(c[7])
|
|
||||||
: "r"(a0[0]), "r"(a1[0]), "r"(a0[1]), "r"(a1[1]), "r"(b[1]),
|
|
||||||
"r"(b[3]), "r"(b[5]), "r"(b[7]), "f"(c[4]), "f"(c[5]),
|
|
||||||
"f"(c[6]), "f"(c[7]), "r"(e[0]));
|
|
||||||
} else {
|
|
||||||
asm volatile(MMA_SP_INST
|
|
||||||
"{%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9, %10,%11}, "
|
|
||||||
"{%12,%13,%14,%15}, %16, 0x1;\n"
|
|
||||||
: "=f"(c[0]), "=f"(c[1]), "=f"(c[2]), "=f"(c[3])
|
|
||||||
: "r"(a0[0]), "r"(a1[0]), "r"(a0[1]), "r"(a1[1]), "r"(b[0]),
|
|
||||||
"r"(b[2]), "r"(b[4]), "r"(b[6]), "f"(c[0]), "f"(c[1]),
|
|
||||||
"f"(c[2]), "f"(c[3]), "r"(e[0]));
|
|
||||||
asm volatile(MMA_SP_INST
|
|
||||||
"{%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9, %10,%11}, "
|
|
||||||
"{%12,%13,%14,%15}, %16, 0x1;\n"
|
|
||||||
: "=f"(c[4]), "=f"(c[5]), "=f"(c[6]), "=f"(c[7])
|
|
||||||
: "r"(a0[0]), "r"(a1[0]), "r"(a0[1]), "r"(a1[1]), "r"(b[1]),
|
|
||||||
"r"(b[3]), "r"(b[5]), "r"(b[7]), "f"(c[4]), "f"(c[5]),
|
|
||||||
"f"(c[6]), "f"(c[7]), "r"(e[0]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup-table based 3-input logical operation; explicitly used for
|
|
||||||
// dequantization as the compiler does not seem to automatically recognize it in
|
|
||||||
// all cases.
|
|
||||||
template <int lut>
|
|
||||||
__device__ inline int lop3(int a, int b, int c) {
|
|
||||||
int res;
|
|
||||||
asm volatile("lop3.b32 %0, %1, %2, %3, %4;\n"
|
|
||||||
: "=r"(res)
|
|
||||||
: "r"(a), "r"(b), "r"(c), "n"(lut));
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ __forceinline__ uint2 to_half4(float c0, float c1, float c2,
|
|
||||||
float c3) {
|
|
||||||
uint2 r;
|
|
||||||
asm("{\n\t"
|
|
||||||
".reg .f16 a, b, c, d; \n\t"
|
|
||||||
"cvt.rn.f16.f32 a, %2; \n\t"
|
|
||||||
"cvt.rn.f16.f32 b, %3; \n\t"
|
|
||||||
"cvt.rn.f16.f32 c, %4; \n\t"
|
|
||||||
"cvt.rn.f16.f32 d, %5; \n\t"
|
|
||||||
"mov.b32 %0, {a, b}; \n\t"
|
|
||||||
"mov.b32 %1, {c, d}; \n\t"
|
|
||||||
"}"
|
|
||||||
: "=r"(r.x), "=r"(r.y)
|
|
||||||
: "f"(c0), "f"(c1), "f"(c2), "f"(c3));
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Constructs destination register by taking bytes from 2 sources (based on
|
|
||||||
// mask)
|
|
||||||
template <int start_byte, int mask>
|
|
||||||
__device__ inline uint32_t prmt(uint32_t a) {
|
|
||||||
uint32_t res;
|
|
||||||
asm volatile("prmt.b32 %0, %1, %2, %3;\n"
|
|
||||||
: "=r"(res)
|
|
||||||
: "r"(a), "n"(start_byte), "n"(mask));
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Efficiently dequantize an int32 value into a full B-fragment of 4 fp16
|
|
||||||
// values. We mostly follow the strategy in the link below, with some small
|
|
||||||
// changes:
|
|
||||||
// https://github.com/NVIDIA/FasterTransformer/blob/main/src/fastertransformer/cutlass_extensions/include/cutlass_extensions/interleaved_numeric_conversion.h
|
|
||||||
__device__ inline FragB dequant_4bit(int q) {
|
|
||||||
const int LO = 0x000f000f;
|
|
||||||
const int HI = 0x00f000f0;
|
|
||||||
const int EX = 0x64006400;
|
|
||||||
// Guarantee that the `(a & b) | c` operations are LOP3s.
|
|
||||||
int lo = lop3<(0xf0 & 0xcc) | 0xaa>(q, LO, EX);
|
|
||||||
int hi = lop3<(0xf0 & 0xcc) | 0xaa>(q, HI, EX);
|
|
||||||
// We want signed int4 outputs, hence we fuse the `-8` symmetric zero point
|
|
||||||
// directly into `SUB` and `ADD`.
|
|
||||||
const int SUB = 0x64086408;
|
|
||||||
const int MUL = 0x2c002c00;
|
|
||||||
const int ADD = 0xd480d480;
|
|
||||||
|
|
||||||
FragB frag_b;
|
|
||||||
frag_b[0] = __hsub2(*reinterpret_cast<half2*>(&lo),
|
|
||||||
*reinterpret_cast<const half2*>(&SUB));
|
|
||||||
frag_b[1] = __hfma2(*reinterpret_cast<half2*>(&hi),
|
|
||||||
*reinterpret_cast<const half2*>(&MUL),
|
|
||||||
*reinterpret_cast<const half2*>(&ADD));
|
|
||||||
return frag_b;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Efficiently dequantize an int32 value into a full B-fragment of 4 fp16
|
|
||||||
// values. We mostly follow the strategy in the link below, with some small
|
|
||||||
// changes:
|
|
||||||
// https://github.com/NVIDIA/FasterTransformer/blob/main/src/fastertransformer/cutlass_extensions/include/cutlass_extensions/interleaved_numeric_conversion.h
|
|
||||||
__device__ inline FragB dequant_8bit(int q) {
|
|
||||||
static constexpr uint32_t mask_for_elt_01 = 0x5250;
|
|
||||||
static constexpr uint32_t mask_for_elt_23 = 0x5351;
|
|
||||||
static constexpr uint32_t start_byte_for_fp16 = 0x64646464;
|
|
||||||
|
|
||||||
uint32_t lo = prmt<start_byte_for_fp16, mask_for_elt_01>(q);
|
|
||||||
uint32_t hi = prmt<start_byte_for_fp16, mask_for_elt_23>(q);
|
|
||||||
|
|
||||||
static constexpr uint32_t I8s_TO_F16s_MAGIC_NUM = 0x64806480;
|
|
||||||
|
|
||||||
FragB frag_b;
|
|
||||||
frag_b[0] = __hsub2(*reinterpret_cast<half2*>(&lo),
|
|
||||||
*reinterpret_cast<const half2*>(&I8s_TO_F16s_MAGIC_NUM));
|
|
||||||
frag_b[1] = __hsub2(*reinterpret_cast<half2*>(&hi),
|
|
||||||
*reinterpret_cast<const half2*>(&I8s_TO_F16s_MAGIC_NUM));
|
|
||||||
return frag_b;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multiply dequantized values by the corresponding quantization scale; used
|
|
||||||
// only for grouped quantization.
|
|
||||||
__device__ inline void scale(FragB& frag_b, FragS& frag_s, int i) {
|
|
||||||
half2 s = __half2half2(reinterpret_cast<__half*>(&frag_s)[i]);
|
|
||||||
frag_b[0] = __hmul2(frag_b[0], s);
|
|
||||||
frag_b[1] = __hmul2(frag_b[1], s);
|
|
||||||
}
|
|
||||||
|
|
||||||
__device__ inline void scale_floats(float* c0, float* c1, float* c2, float* c3,
|
|
||||||
FragS& s0, float* c4, float* c5, float* c6,
|
|
||||||
float* c7, FragS& s1) {
|
|
||||||
*c0 = __fmul_rn(*c0, __half2float(s0[0].x));
|
|
||||||
*c1 = __fmul_rn(*c1, __half2float(s0[0].y));
|
|
||||||
*c2 = __fmul_rn(*c2, __half2float(s0[1].x));
|
|
||||||
*c3 = __fmul_rn(*c3, __half2float(s0[1].y));
|
|
||||||
|
|
||||||
*c4 = __fmul_rn(*c4, __half2float(s1[0].x));
|
|
||||||
*c5 = __fmul_rn(*c5, __half2float(s1[0].y));
|
|
||||||
*c6 = __fmul_rn(*c6, __half2float(s1[1].x));
|
|
||||||
*c7 = __fmul_rn(*c7, __half2float(s1[1].y));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace marlin_24
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -259,14 +259,6 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
|
|||||||
// custom types:
|
// custom types:
|
||||||
// https://docs.google.com/document/d/18fBMPuOJ0fY5ZQ6YyrHUppw9FA332CpNtgB6SOIgyuA
|
// https://docs.google.com/document/d/18fBMPuOJ0fY5ZQ6YyrHUppw9FA332CpNtgB6SOIgyuA
|
||||||
|
|
||||||
// Marlin_24 (Sparse) Optimized Quantized GEMM for GPTQ.
|
|
||||||
ops.def(
|
|
||||||
"gptq_marlin_24_gemm(Tensor a, Tensor b_q_weight, Tensor b_meta, "
|
|
||||||
"Tensor b_scales, Tensor workspace, "
|
|
||||||
"int b_q_type, "
|
|
||||||
"SymInt size_m, SymInt size_n, SymInt size_k) -> Tensor");
|
|
||||||
// conditionally compiled so impl in source file
|
|
||||||
|
|
||||||
// Machete (Dense) Optimized Mixed Precision GEMM for Hopper.
|
// Machete (Dense) Optimized Mixed Precision GEMM for Hopper.
|
||||||
ops.def(
|
ops.def(
|
||||||
"machete_supported_schedules("
|
"machete_supported_schedules("
|
||||||
|
|||||||
@@ -135,7 +135,7 @@ ENV UV_LINK_MODE=copy
|
|||||||
RUN gcc --version
|
RUN gcc --version
|
||||||
|
|
||||||
# Ensure CUDA compatibility library is loaded
|
# Ensure CUDA compatibility library is loaded
|
||||||
RUN echo "/usr/local/cuda-$(echo "$CUDA_VERSION" | cut -d. -f1,2)/compat/" > /etc/ld.so.conf.d/00-cuda-compat.conf && ldconfig
|
RUN echo "/usr/local/cuda-$(echo "$CUDA_VERSION" | cut -d. -f1,2)/compat/" > /etc/ld.so.conf.d/cuda-compat.conf && ldconfig
|
||||||
|
|
||||||
# ============================================================
|
# ============================================================
|
||||||
# SLOW-CHANGING DEPENDENCIES BELOW
|
# SLOW-CHANGING DEPENDENCIES BELOW
|
||||||
@@ -565,7 +565,7 @@ ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
|||||||
ENV UV_LINK_MODE=copy
|
ENV UV_LINK_MODE=copy
|
||||||
|
|
||||||
# Ensure CUDA compatibility library is loaded
|
# Ensure CUDA compatibility library is loaded
|
||||||
RUN echo "/usr/local/cuda-$(echo "$CUDA_VERSION" | cut -d. -f1,2)/compat/" > /etc/ld.so.conf.d/00-cuda-compat.conf && ldconfig
|
RUN echo "/usr/local/cuda-$(echo "$CUDA_VERSION" | cut -d. -f1,2)/compat/" > /etc/ld.so.conf.d/cuda-compat.conf && ldconfig
|
||||||
|
|
||||||
# ============================================================
|
# ============================================================
|
||||||
# SLOW-CHANGING DEPENDENCIES BELOW
|
# SLOW-CHANGING DEPENDENCIES BELOW
|
||||||
|
|||||||
@@ -25,7 +25,7 @@
|
|||||||
######################### COMMON BASE IMAGE #########################
|
######################### COMMON BASE IMAGE #########################
|
||||||
FROM ubuntu:22.04 AS base-common
|
FROM ubuntu:22.04 AS base-common
|
||||||
|
|
||||||
WORKDIR /workspace/
|
WORKDIR /workspace
|
||||||
|
|
||||||
ARG PYTHON_VERSION=3.12
|
ARG PYTHON_VERSION=3.12
|
||||||
ARG PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
|
ARG PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
|
||||||
@@ -109,7 +109,7 @@ ENV VLLM_CPU_AVX512VNNI=${VLLM_CPU_AVX512VNNI}
|
|||||||
ARG VLLM_CPU_AMXBF16=1
|
ARG VLLM_CPU_AMXBF16=1
|
||||||
ENV VLLM_CPU_AMXBF16=${VLLM_CPU_AMXBF16}
|
ENV VLLM_CPU_AMXBF16=${VLLM_CPU_AMXBF16}
|
||||||
|
|
||||||
WORKDIR /workspace/vllm
|
WORKDIR /vllm-workspace
|
||||||
|
|
||||||
# Copy build requirements
|
# Copy build requirements
|
||||||
COPY requirements/cpu-build.txt requirements/build.txt
|
COPY requirements/cpu-build.txt requirements/build.txt
|
||||||
@@ -123,13 +123,13 @@ RUN if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh ; fi
|
|||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
--mount=type=cache,target=/root/.cache/ccache \
|
--mount=type=cache,target=/root/.cache/ccache \
|
||||||
--mount=type=cache,target=/workspace/vllm/.deps,sharing=locked \
|
--mount=type=cache,target=/vllm-workspace/.deps,sharing=locked \
|
||||||
VLLM_TARGET_DEVICE=cpu python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38
|
VLLM_TARGET_DEVICE=cpu python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38
|
||||||
|
|
||||||
######################### TEST DEPS #########################
|
######################### TEST DEPS #########################
|
||||||
FROM base AS vllm-test-deps
|
FROM base AS vllm-test-deps
|
||||||
|
|
||||||
WORKDIR /workspace/vllm
|
WORKDIR /vllm-workspace
|
||||||
|
|
||||||
# Copy test requirements
|
# Copy test requirements
|
||||||
COPY requirements/test.in requirements/cpu-test.in
|
COPY requirements/test.in requirements/cpu-test.in
|
||||||
@@ -157,7 +157,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
######################### DEV IMAGE #########################
|
######################### DEV IMAGE #########################
|
||||||
FROM vllm-build AS vllm-dev
|
FROM vllm-build AS vllm-dev
|
||||||
|
|
||||||
WORKDIR /workspace/vllm
|
WORKDIR /vllm-workspace
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
@@ -174,7 +174,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
--mount=type=bind,source=.git,target=.git \
|
--mount=type=bind,source=.git,target=.git \
|
||||||
VLLM_TARGET_DEVICE=cpu python3 setup.py develop
|
VLLM_TARGET_DEVICE=cpu python3 setup.py develop
|
||||||
|
|
||||||
COPY --from=vllm-test-deps /workspace/vllm/requirements/cpu-test.txt requirements/test.txt
|
COPY --from=vllm-test-deps /vllm-workspace/requirements/cpu-test.txt requirements/test.txt
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
uv pip install -r requirements/dev.txt && \
|
uv pip install -r requirements/dev.txt && \
|
||||||
@@ -185,10 +185,10 @@ ENTRYPOINT ["bash"]
|
|||||||
######################### TEST IMAGE #########################
|
######################### TEST IMAGE #########################
|
||||||
FROM vllm-test-deps AS vllm-test
|
FROM vllm-test-deps AS vllm-test
|
||||||
|
|
||||||
WORKDIR /workspace/
|
WORKDIR /vllm-workspace
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
--mount=type=bind,from=vllm-build,src=/workspace/vllm/dist,target=dist \
|
--mount=type=bind,from=vllm-build,src=/vllm-workspace/dist,target=dist \
|
||||||
uv pip install dist/*.whl
|
uv pip install dist/*.whl
|
||||||
|
|
||||||
ADD ./tests/ ./tests/
|
ADD ./tests/ ./tests/
|
||||||
@@ -197,9 +197,6 @@ ADD ./benchmarks/ ./benchmarks/
|
|||||||
ADD ./vllm/collect_env.py .
|
ADD ./vllm/collect_env.py .
|
||||||
ADD ./.buildkite/ ./.buildkite/
|
ADD ./.buildkite/ ./.buildkite/
|
||||||
|
|
||||||
# Create symlink for vllm-workspace to maintain CI compatibility
|
|
||||||
RUN ln -sf /workspace /vllm-workspace
|
|
||||||
|
|
||||||
# install development dependencies (for testing)
|
# install development dependencies (for testing)
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
uv pip install -e tests/vllm_test_utils
|
uv pip install -e tests/vllm_test_utils
|
||||||
@@ -207,11 +204,11 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
|||||||
######################### RELEASE IMAGE #########################
|
######################### RELEASE IMAGE #########################
|
||||||
FROM base AS vllm-openai
|
FROM base AS vllm-openai
|
||||||
|
|
||||||
WORKDIR /workspace/
|
WORKDIR /vllm-workspace
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
--mount=type=cache,target=/root/.cache/ccache \
|
--mount=type=cache,target=/root/.cache/ccache \
|
||||||
--mount=type=bind,from=vllm-build,src=/workspace/vllm/dist,target=dist \
|
--mount=type=bind,from=vllm-build,src=/vllm-workspace/dist,target=dist \
|
||||||
uv pip install dist/*.whl
|
uv pip install dist/*.whl
|
||||||
|
|
||||||
# Add labels to document build configuration
|
# Add labels to document build configuration
|
||||||
|
|||||||
@@ -227,7 +227,7 @@ RUN if [ "$GIT_REPO_CHECK" != "0" ]; then \
|
|||||||
# This ensures setuptools_scm sees clean repo state for version detection
|
# This ensures setuptools_scm sees clean repo state for version detection
|
||||||
RUN --mount=type=bind,source=.git,target=vllm/.git \
|
RUN --mount=type=bind,source=.git,target=vllm/.git \
|
||||||
cd vllm \
|
cd vllm \
|
||||||
&& pip install setuptools_scm \
|
&& pip install setuptools_scm regex \
|
||||||
&& VLLM_VERSION=$(python3 -c "import setuptools_scm; print(setuptools_scm.get_version())") \
|
&& VLLM_VERSION=$(python3 -c "import setuptools_scm; print(setuptools_scm.get_version())") \
|
||||||
&& echo "Detected vLLM version: ${VLLM_VERSION}" \
|
&& echo "Detected vLLM version: ${VLLM_VERSION}" \
|
||||||
&& echo "${VLLM_VERSION}" > /tmp/vllm_version.txt
|
&& echo "${VLLM_VERSION}" > /tmp/vllm_version.txt
|
||||||
@@ -342,6 +342,19 @@ RUN mkdir src && mv vllm src/vllm
|
|||||||
FROM base AS final
|
FROM base AS final
|
||||||
|
|
||||||
RUN python3 -m pip install --upgrade pip && rm -rf /var/lib/apt/lists/*
|
RUN python3 -m pip install --upgrade pip && rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Clean up sccache from release image (not needed at runtime)
|
||||||
|
# This removes the binary and wrappers that may have been installed during build
|
||||||
|
RUN rm -f /usr/bin/sccache || true \
|
||||||
|
&& rm -rf /opt/sccache-wrappers || true
|
||||||
|
|
||||||
|
# Unset sccache environment variables for the release image
|
||||||
|
# This prevents S3 bucket config from leaking into production images
|
||||||
|
ENV SCCACHE_BUCKET=
|
||||||
|
ENV SCCACHE_REGION=
|
||||||
|
ENV SCCACHE_S3_NO_CREDENTIALS=
|
||||||
|
ENV SCCACHE_IDLE_TIMEOUT=
|
||||||
|
|
||||||
# Error related to odd state for numpy 1.20.3 where there is no METADATA etc, but an extra LICENSES_bundled.txt.
|
# Error related to odd state for numpy 1.20.3 where there is no METADATA etc, but an extra LICENSES_bundled.txt.
|
||||||
# Manually remove it so that later steps of numpy upgrade can continue
|
# Manually remove it so that later steps of numpy upgrade can continue
|
||||||
RUN case "$(which python3)" in \
|
RUN case "$(which python3)" in \
|
||||||
|
|||||||
@@ -47,6 +47,10 @@ You can tune the performance by adjusting `max_num_batched_tokens`:
|
|||||||
- For optimal throughput, we recommend setting `max_num_batched_tokens > 8192` especially for smaller models on large GPUs.
|
- For optimal throughput, we recommend setting `max_num_batched_tokens > 8192` especially for smaller models on large GPUs.
|
||||||
- If `max_num_batched_tokens` is the same as `max_model_len`, that's almost the equivalent to the V0 default scheduling policy (except that it still prioritizes decodes).
|
- If `max_num_batched_tokens` is the same as `max_model_len`, that's almost the equivalent to the V0 default scheduling policy (except that it still prioritizes decodes).
|
||||||
|
|
||||||
|
!!! warning
|
||||||
|
When chunked prefill is disabled, `max_num_batched_tokens` must be greater than `max_model_len`.
|
||||||
|
In that case, if `max_num_batched_tokens < max_model_len`, vLLM may crash at server start‑up.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from vllm import LLM
|
from vllm import LLM
|
||||||
|
|
||||||
@@ -286,3 +290,9 @@ Based on the configuration, the content of the multi-modal caches on `P0` and `P
|
|||||||
|
|
||||||
K: Stores the hashes of multi-modal items
|
K: Stores the hashes of multi-modal items
|
||||||
V: Stores the processed tensor data of multi-modal items
|
V: Stores the processed tensor data of multi-modal items
|
||||||
|
|
||||||
|
## Attention Backend Selection
|
||||||
|
|
||||||
|
vLLM supports multiple attention backends optimized for different hardware and use cases. The backend is automatically selected based on your GPU architecture, model type, and configuration, but you can also manually specify one for optimal performance.
|
||||||
|
|
||||||
|
For detailed information on available backends, their feature support, and how to configure them, see the [Attention Backend Feature Support](../design/attention_backends.md) documentation.
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ The initialization code should look like this:
|
|||||||
```python
|
```python
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from vllm.config import VllmConfig
|
from vllm.config import VllmConfig
|
||||||
from vllm.attention.layer import Attention
|
from vllm.model_executor.layers.attention import Attention
|
||||||
|
|
||||||
class MyAttention(nn.Module):
|
class MyAttention(nn.Module):
|
||||||
def __init__(self, vllm_config: VllmConfig, prefix: str):
|
def __init__(self, vllm_config: VllmConfig, prefix: str):
|
||||||
@@ -71,7 +71,7 @@ class MyModel(nn.Module):
|
|||||||
```python
|
```python
|
||||||
def forward(
|
def forward(
|
||||||
self,
|
self,
|
||||||
input_ids: torch.Tensor,
|
input_ids: torch.Tensor | None,
|
||||||
positions: torch.Tensor,
|
positions: torch.Tensor,
|
||||||
intermediate_tensors: IntermediateTensors | None = None,
|
intermediate_tensors: IntermediateTensors | None = None,
|
||||||
inputs_embeds: torch.Tensor | None = None,
|
inputs_embeds: torch.Tensor | None = None,
|
||||||
|
|||||||
@@ -43,9 +43,54 @@ Further update the model as follows:
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
- Implement [embed_multimodal][vllm.model_executor.models.interfaces.SupportsMultiModal.embed_multimodal] that returns the embeddings from running the multimodal inputs through the multimodal tokenizer of the model. Below we provide a boilerplate of a typical implementation pattern, but feel free to adjust it to your own needs.
|
- Remove the embedding part from the [forward][torch.nn.Module.forward] method:
|
||||||
|
- Move the multi-modal embedding to [embed_multimodal][vllm.model_executor.models.interfaces.SupportsMultiModal.embed_multimodal].
|
||||||
|
- The text embedding and embedding merge are handled automatically by a default implementation of [embed_input_ids][vllm.model_executor.models.interfaces.SupportsMultiModal.embed_input_ids]. It does not need to be overridden in most cases.
|
||||||
|
|
||||||
??? code
|
```diff
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
input_ids: torch.Tensor | None,
|
||||||
|
- pixel_values: torch.Tensor,
|
||||||
|
positions: torch.Tensor,
|
||||||
|
intermediate_tensors: IntermediateTensors | None = None,
|
||||||
|
inputs_embeds: torch.Tensor | None = None,
|
||||||
|
) -> torch.Tensor:
|
||||||
|
- if inputs_embeds is None:
|
||||||
|
- inputs_embeds = self.get_input_embeddings()(input_ids)
|
||||||
|
-
|
||||||
|
- if pixel_values is not None:
|
||||||
|
- image_features = self.get_image_features(
|
||||||
|
- pixel_values=pixel_values,
|
||||||
|
- )
|
||||||
|
- special_image_mask = self.get_placeholder_mask(
|
||||||
|
- input_ids,
|
||||||
|
- inputs_embeds=inputs_embeds,
|
||||||
|
- image_features=image_features,
|
||||||
|
- )
|
||||||
|
- inputs_embeds = inputs_embeds.masked_scatter(
|
||||||
|
- special_image_mask,
|
||||||
|
- image_features,
|
||||||
|
- )
|
||||||
|
|
||||||
|
hidden_states = self.language_model(
|
||||||
|
input_ids,
|
||||||
|
positions,
|
||||||
|
intermediate_tensors,
|
||||||
|
inputs_embeds=inputs_embeds,
|
||||||
|
)
|
||||||
|
...
|
||||||
|
|
||||||
|
+ def embed_multimodal(
|
||||||
|
+ self,
|
||||||
|
+ pixel_values: torch.Tensor,
|
||||||
|
+ ) -> MultiModalEmbeddings | None:
|
||||||
|
+ return self.get_image_features(
|
||||||
|
+ pixel_values=pixel_values,
|
||||||
|
+ )
|
||||||
|
```
|
||||||
|
|
||||||
|
Below we provide a boilerplate of a typical implementation pattern of [embed_multimodal][vllm.model_executor.models.interfaces.SupportsMultiModal.embed_multimodal], but feel free to adjust it to your own needs.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def _process_image_input(self, image_input: YourModelImageInputs) -> torch.Tensor:
|
def _process_image_input(self, image_input: YourModelImageInputs) -> torch.Tensor:
|
||||||
|
|||||||
212
docs/design/attention_backends.md
Normal file
212
docs/design/attention_backends.md
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
# Attention Backend Feature Support
|
||||||
|
|
||||||
|
This document is auto-generated by `tools/pre_commit/generate_attention_backend_docs.py`.
|
||||||
|
It shows the feature support for each registered attention backend
|
||||||
|
based on the checks in `AttentionBackend.validate_configuration()`.
|
||||||
|
|
||||||
|
**Do not edit this file manually.** Run the following command to
|
||||||
|
regenerate it:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python tools/pre_commit/generate_attention_backend_docs.py
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setting the Attention Backend
|
||||||
|
|
||||||
|
### Command Line
|
||||||
|
|
||||||
|
There are two ways to specify the backend from the command line:
|
||||||
|
|
||||||
|
**Option 1: Using `--attention-backend` (simple)**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vllm serve <model> --attention-backend FLASH_ATTN
|
||||||
|
```
|
||||||
|
|
||||||
|
**Option 2: Using `--attention-config.backend` / `-ac.backend` (structured config)**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Dot notation
|
||||||
|
vllm serve <model> --attention-config.backend FLASH_ATTN
|
||||||
|
vllm serve <model> -ac.backend FLASH_ATTN
|
||||||
|
|
||||||
|
# JSON format
|
||||||
|
vllm serve <model> --attention-config '{"backend": "FLASH_ATTN"}'
|
||||||
|
vllm serve <model> -ac '{"backend": "FLASH_ATTN"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note:** `--attention-backend` and `--attention-config.backend` are mutually
|
||||||
|
> exclusive. Use one or the other, not both.
|
||||||
|
|
||||||
|
### Python API
|
||||||
|
|
||||||
|
Use `AttentionConfig` with the `LLM` class:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from vllm import LLM
|
||||||
|
from vllm.config import AttentionConfig
|
||||||
|
from vllm.v1.attention.backends.registry import AttentionBackendEnum
|
||||||
|
|
||||||
|
# Method 1: Using AttentionConfig with enum
|
||||||
|
llm = LLM(
|
||||||
|
model="Qwen/Qwen3-0.6B",
|
||||||
|
attention_config=AttentionConfig(backend=AttentionBackendEnum.FLASH_ATTN),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Method 2: Using attention_backend parameter with string
|
||||||
|
llm = LLM(
|
||||||
|
model="Qwen/Qwen3-0.6B",
|
||||||
|
attention_backend="FLASH_ATTN",
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backend Selection Behavior
|
||||||
|
|
||||||
|
### Manual Selection
|
||||||
|
|
||||||
|
When you explicitly set a backend via `--attention-backend` or `AttentionConfig`:
|
||||||
|
|
||||||
|
1. The backend is **validated** against your configuration (model dtype, head
|
||||||
|
size, compute capability, etc.)
|
||||||
|
2. If the backend **doesn't support** your configuration, an error is raised
|
||||||
|
with the specific reason
|
||||||
|
3. If valid, the backend is used
|
||||||
|
|
||||||
|
Example error when selecting an incompatible backend:
|
||||||
|
|
||||||
|
```text
|
||||||
|
ValueError: Selected backend FLASHMLA is not valid for this configuration.
|
||||||
|
Reason: ['compute capability not supported']
|
||||||
|
```
|
||||||
|
|
||||||
|
### Automatic Selection
|
||||||
|
|
||||||
|
When no backend is specified (the default):
|
||||||
|
|
||||||
|
1. vLLM iterates through backends in **priority order** (see tables below)
|
||||||
|
2. Each backend is validated against your configuration
|
||||||
|
3. The **first compatible backend** is selected
|
||||||
|
4. If no backend is compatible, an error is raised listing all backends and
|
||||||
|
their incompatibility reasons
|
||||||
|
|
||||||
|
## Backend Priority (CUDA)
|
||||||
|
|
||||||
|
When no backend is explicitly selected, vLLM chooses the first
|
||||||
|
compatible backend from these priority-ordered lists.
|
||||||
|
|
||||||
|
Priority is **1 = highest** (tried first).
|
||||||
|
|
||||||
|
### Standard Attention (MHA, MQA, GQA)
|
||||||
|
|
||||||
|
**Blackwell (SM 10.x):**
|
||||||
|
|
||||||
|
| Priority | Backend |
|
||||||
|
|----------|---------|
|
||||||
|
| 1 | `FLASHINFER` |
|
||||||
|
| 2 | `FLASH_ATTN` |
|
||||||
|
| 3 | `TRITON_ATTN` |
|
||||||
|
| 4 | `FLEX_ATTENTION` |
|
||||||
|
|
||||||
|
**Ampere/Hopper (SM 8.x-9.x):**
|
||||||
|
|
||||||
|
| Priority | Backend |
|
||||||
|
|----------|---------|
|
||||||
|
| 1 | `FLASH_ATTN` |
|
||||||
|
| 2 | `FLASHINFER` |
|
||||||
|
| 3 | `TRITON_ATTN` |
|
||||||
|
| 4 | `FLEX_ATTENTION` |
|
||||||
|
|
||||||
|
### MLA Attention (DeepSeek-style)
|
||||||
|
|
||||||
|
**Blackwell (SM 10.x):**
|
||||||
|
|
||||||
|
| Priority | Backend |
|
||||||
|
|----------|---------|
|
||||||
|
| 1 | `FLASHINFER_MLA` |
|
||||||
|
| 2 | `CUTLASS_MLA` |
|
||||||
|
| 3 | `FLASH_ATTN_MLA` |
|
||||||
|
| 4 | `FLASHMLA` |
|
||||||
|
| 5 | `TRITON_MLA` |
|
||||||
|
| 6 | `FLASHMLA_SPARSE` |
|
||||||
|
|
||||||
|
**Ampere/Hopper (SM 8.x-9.x):**
|
||||||
|
|
||||||
|
| Priority | Backend |
|
||||||
|
|----------|---------|
|
||||||
|
| 1 | `FLASH_ATTN_MLA` |
|
||||||
|
| 2 | `FLASHMLA` |
|
||||||
|
| 3 | `FLASHINFER_MLA` |
|
||||||
|
| 4 | `TRITON_MLA` |
|
||||||
|
| 5 | `FLASHMLA_SPARSE` |
|
||||||
|
|
||||||
|
> **Note:** ROCm and CPU platforms have their own selection logic. See the platform-specific documentation for details.
|
||||||
|
|
||||||
|
## Legend
|
||||||
|
|
||||||
|
| Column | Description |
|
||||||
|
|--------|-------------|
|
||||||
|
| **Dtypes** | Supported model data types (fp16, bf16, fp32) |
|
||||||
|
| **KV Dtypes** | Supported KV cache data types (`auto`, `fp8`, `fp8_e4m3`, etc.) |
|
||||||
|
| **Block Sizes** | Supported KV cache block sizes (%N means multiples of N) |
|
||||||
|
| **Head Sizes** | Supported attention head sizes |
|
||||||
|
| **Sink** | Attention sink support (for StreamingLLM) |
|
||||||
|
| **Sparse** | Sparse attention support (MLA only) |
|
||||||
|
| **MM Prefix** | Multimodal prefix full attention support |
|
||||||
|
| **Attention Types** | Supported attention patterns (Decoder, Encoder, Enc-Dec) |
|
||||||
|
| **Compute Cap.** | Required CUDA compute capability (N/A for non-CUDA backends) |
|
||||||
|
|
||||||
|
**Symbols:** ✅ = Supported, ❌ = Not supported
|
||||||
|
|
||||||
|
## Standard Attention (MHA, MQA, GQA) Backends
|
||||||
|
|
||||||
|
| Backend | Version | Dtypes | KV Dtypes | Block Sizes | Head Sizes | Sink | MM Prefix | Attention Types | Compute Cap. |
|
||||||
|
|---------|---------|--------|-----------|-------------|------------|------|-----------|-----------------|--------------|
|
||||||
|
| `CPU_ATTN` | | fp16, bf16, fp32 | `auto` | Any | 32, 64, 80, 96, 112, 128, 160, 192, 224, 256 | ❌ | ❌ | All | N/A |
|
||||||
|
| `FLASHINFER` | Native† | fp16, bf16 | `auto`, `bfloat16`, `fp8`, `fp8_e4m3`, `fp8_e5m2` | 16, 32, 64 | 64, 128, 256 | ❌ | ❌ | Decoder | 7.x-9.x |
|
||||||
|
| `FLASHINFER` | TRTLLM† | fp16, bf16 | `auto`, `bfloat16`, `fp8`, `fp8_e4m3`, `fp8_e5m2` | 16, 32, 64 | 64, 128, 256 | ✅ | ❌ | Decoder | 10.x |
|
||||||
|
| `FLASH_ATTN` | FA2* | fp16, bf16 | `auto`, `bfloat16` | %16 | Any | ❌ | ❌ | All | ≥8.0 |
|
||||||
|
| `FLASH_ATTN` | FA3* | fp16, bf16 | `auto`, `bfloat16`, `fp8`, `fp8_e4m3`, `fp8_e5m2` | %16 | Any | ✅ | ❌ | All | 9.x |
|
||||||
|
| `FLASH_ATTN_DIFFKV` | | fp16, bf16 | `auto` | Any | Any | ❌ | ❌ | Decoder | Any |
|
||||||
|
| `FLEX_ATTENTION` | | fp16, bf16, fp32 | `auto`, `bfloat16` | Any | Any | ❌ | ✅ | Decoder, Encoder Only | Any |
|
||||||
|
| `ROCM_AITER_FA` | | fp16, bf16 | `auto` | %16 | 64, 128, 256 | ❌ | ❌ | Decoder | N/A |
|
||||||
|
| `ROCM_AITER_UNIFIED_ATTN` | | fp16, bf16 | `auto` | Any | Any | ❌ | ❌ | Decoder | N/A |
|
||||||
|
| `ROCM_ATTN` | | fp16, bf16, fp32 | `auto` | 16, 32, 544 | 32, 64, 96, 128, 160, 192, 224, 256 | ❌ | ❌ | Decoder | N/A |
|
||||||
|
| `TREE_ATTN` | | fp16, bf16 | `auto` | %16 | 32, 64, 96, 128, 160, 192, 224, 256 | ❌ | ❌ | Decoder | Any |
|
||||||
|
| `TRITON_ATTN` | | fp16, bf16, fp32 | `auto`, `bfloat16`, `fp8`, `fp8_e4m3`, `fp8_e5m2` | %16 | Any | ✅ | ✅ | All | Any |
|
||||||
|
|
||||||
|
> **†** FlashInfer uses TRTLLM attention on Blackwell (SM100), which supports sinks. Disable via `--attention-config.use_trtllm_attention=0`.
|
||||||
|
>
|
||||||
|
> **\*** Specify the FlashAttention version via `--attention-config.flash_attn_version=2` or `3`. Default is FA3 on SM90, FA2 otherwise.
|
||||||
|
|
||||||
|
## MLA (Multi-head Latent Attention) Backends
|
||||||
|
|
||||||
|
MLA uses separate backends for prefill and decode phases.
|
||||||
|
|
||||||
|
### Prefill Backends
|
||||||
|
|
||||||
|
The prefill backend is selected at runtime based on hardware and
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
| Backend | Description | Compute Cap. | Enable | Disable | Notes |
|
||||||
|
|---------|-------------|--------------|--------|---------|-------|
|
||||||
|
| TRT-LLM Ragged‡ | TensorRT-LLM ragged attention | 10.x | Default on SM100 | `-ac.use_trtllm_ragged_deepseek_prefill=0` | DeepSeek R1 dims only |
|
||||||
|
| FlashInfer | FlashInfer CUTLASS backend | 10.x | `-ac.disable_flashinfer_prefill=0` | `-ac.disable_flashinfer_prefill=1` | DeepSeek R1 dims only |
|
||||||
|
| cuDNN | cuDNN-based attention | 10.x | `-ac.use_cudnn_prefill=1` | `-ac.use_cudnn_prefill=0` | |
|
||||||
|
| FlashAttention | FlashAttention varlen (FA2/FA3) | Any | Default fallback | Use other backends | FA3 on SM90, FA2 otherwise |
|
||||||
|
|
||||||
|
> **‡** TRT-LLM Ragged is the default on Blackwell (SM100).
|
||||||
|
> On other GPUs, FlashAttention is used as the default.
|
||||||
|
|
||||||
|
### Decode Backends
|
||||||
|
|
||||||
|
| Backend | Dtypes | KV Dtypes | Block Sizes | Head Sizes | Sink | Sparse | MM Prefix | Attention Types | Compute Cap. |
|
||||||
|
|---------|--------|-----------|-------------|------------|------|--------|-----------|-----------------|--------------|
|
||||||
|
| `CUTLASS_MLA` | fp16, bf16 | `auto`, `bfloat16`, `fp8`, `fp8_e4m3` | 128 | Any | ❌ | ❌ | ❌ | Decoder | 10.x |
|
||||||
|
| `FLASHINFER_MLA` | fp16, bf16 | `auto`, `bfloat16`, `fp8`, `fp8_e4m3` | 32, 64 | Any | ❌ | ❌ | ❌ | Decoder | 10.x |
|
||||||
|
| `FLASHMLA` | fp16, bf16 | `auto`, `bfloat16`, `fp8`, `fp8_e4m3` | 64 | Any | ❌ | ❌ | ❌ | Decoder | 9.x-10.x |
|
||||||
|
| `FLASHMLA_SPARSE` | bf16 | `auto`, `bfloat16`, `fp8_ds_mla` | 64 | 576 | ❌ | ✅ | ❌ | Decoder | 9.x-10.x |
|
||||||
|
| `FLASH_ATTN_MLA` | fp16, bf16 | `auto`, `bfloat16` | %16 | Any | ❌ | ❌ | ❌ | Decoder | 9.x |
|
||||||
|
| `ROCM_AITER_MLA` | fp16, bf16 | `auto` | 1 | Any | ❌ | ❌ | ❌ | Decoder | N/A |
|
||||||
|
| `ROCM_AITER_MLA_SPARSE` | fp16, bf16 | `auto` | Any | 576 | ❌ | ❌ | ❌ | Decoder | N/A |
|
||||||
|
| `ROCM_AITER_TRITON_MLA` | fp16, bf16 | `auto` | Any | Any | ❌ | ❌ | ❌ | Decoder | N/A |
|
||||||
|
| `TRITON_MLA` | fp16, bf16 | `auto`, `bfloat16` | Any | Any | ❌ | ❌ | ❌ | Decoder | Any |
|
||||||
@@ -28,7 +28,7 @@ Furthermore, vLLM decides whether to enable or disable a `CustomOp` based on `co
|
|||||||
!!! note
|
!!! note
|
||||||
Note that `all` and `none` cannot coexist in `compilation_config.custom_ops`.
|
Note that `all` and `none` cannot coexist in `compilation_config.custom_ops`.
|
||||||
|
|
||||||
By default, if `compilation_config.backend == "inductor"` and `compilation_config.mode != CompilationMode.NONE`, a `none` will be appended into `compilation_config.custom_ops`, otherwise a `all` will be appended. In other words, this means `CustomOp` will be disabled in some platforms (i.e., those use `inductor` as dafault backend for `torch.compile`) when running with torch compile mode. In this case, Inductor generates (fused) Triton kernels for those disabled custom ops.
|
By default, if `compilation_config.backend == "inductor"` and `compilation_config.mode != CompilationMode.NONE`, a `none` will be appended into `compilation_config.custom_ops`, otherwise a `all` will be appended. In other words, this means `CustomOp` will be disabled in some platforms (i.e., those use `inductor` as default backend for `torch.compile`) when running with torch compile mode. In this case, Inductor generates (fused) Triton kernels for those disabled custom ops.
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
For multi-modal models, vLLM has enforced the enabling of some custom ops to use device-specific deep-optimized kernels for better performance in ViT part, such as `MMEncoderAttention` and `ApplyRotaryEmb`. We can also pass a `enforce_enable=True` param to the `__init__()` method of the `CustomOp` to enforce enable itself at object-level.
|
For multi-modal models, vLLM has enforced the enabling of some custom ops to use device-specific deep-optimized kernels for better performance in ViT part, such as `MMEncoderAttention` and `ApplyRotaryEmb`. We can also pass a `enforce_enable=True` param to the `__init__()` method of the `CustomOp` to enforce enable itself at object-level.
|
||||||
@@ -271,7 +271,7 @@ Taking `MMEncoderAttention` as an example:
|
|||||||
??? code
|
??? code
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from vllm.attention.layers.mm_encoder_attention import MMEncoderAttention
|
from vllm.model_executor.layers.attention import MMEncoderAttention
|
||||||
from vllm.model_executor.custom_op import CustomOp
|
from vllm.model_executor.custom_op import CustomOp
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -39,10 +39,9 @@ For more details on the design, please see the following resources:
|
|||||||
|
|
||||||
## Use tlparse
|
## Use tlparse
|
||||||
|
|
||||||
Use [tlparse](https://github.com/meta-pytorch/tlparse) to acquire torch.compile logs. These logs show all stages of the compilation process,
|
Use [tlparse](https://github.com/meta-pytorch/tlparse) to view torch.compile
|
||||||
including the fused kernels that torch.compile produces.
|
logs. These logs show all stages of the compilation process, including the fused
|
||||||
If you can, we recommend sending these or pieces of these along with any bug reports --
|
kernels that torch.compile produces.
|
||||||
they are very helpful.
|
|
||||||
|
|
||||||
Install tlparse:
|
Install tlparse:
|
||||||
|
|
||||||
@@ -50,11 +49,16 @@ Install tlparse:
|
|||||||
pip install tlparse
|
pip install tlparse
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To enable the torch.compile logs, you can set the envvar `TORCH_TRACE=<dir>`.
|
||||||
|
During tracing, a file per rank will be created inside of that directory, with
|
||||||
|
each file containing the artifacts during compilation. If you can, we recommend
|
||||||
|
sending these log files along with bug reports -- they are very helpful.
|
||||||
|
|
||||||
Usage (offline inference)
|
Usage (offline inference)
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
TORCH_TRACE=~/trace_dir python my_script.py
|
TORCH_TRACE=~/trace_dir python my_script.py
|
||||||
tlparse ~/trace_dir/<the_first_log_file>
|
tlparse ~/trace_dir/<rank_0_log_file>
|
||||||
```
|
```
|
||||||
|
|
||||||
Usage (serving)
|
Usage (serving)
|
||||||
@@ -62,10 +66,11 @@ Usage (serving)
|
|||||||
```sh
|
```sh
|
||||||
TORCH_TRACE=~/trace_dir vllm serve
|
TORCH_TRACE=~/trace_dir vllm serve
|
||||||
# ctrl-c out of the server
|
# ctrl-c out of the server
|
||||||
tlparse ~/trace_dir/<the_first_log_file>
|
tlparse ~/trace_dir/<rank_0_log_file>
|
||||||
```
|
```
|
||||||
|
|
||||||
The `tlparse` command outputs some HTML files (perhaps into e.g. `./tl_out/index.html`).
|
Given one of the log files, the `tlparse` command outputs some HTML files
|
||||||
|
(perhaps into e.g. `./tl_out/index.html`).
|
||||||
Open it to see the logs. It'll look something like the following:
|
Open it to see the logs. It'll look something like the following:
|
||||||
|
|
||||||

|

|
||||||
@@ -206,7 +211,7 @@ LLM(model, compilation_config=CompilationConfig(
|
|||||||
These modes are stricter and reduce or eliminate the need of dynamic shapes guarding, which can help isolate issues:
|
These modes are stricter and reduce or eliminate the need of dynamic shapes guarding, which can help isolate issues:
|
||||||
|
|
||||||
- `unbacked`: Uses unbacked symints which don't allow guards, making it easier to identify where guards are being incorrectly added
|
- `unbacked`: Uses unbacked symints which don't allow guards, making it easier to identify where guards are being incorrectly added
|
||||||
- `backed_size_oblivious`: Uses a mode that is more strict about guarding.
|
- `backed_size_oblivious`: Uses a mode that is stricter about guarding.
|
||||||
|
|
||||||
For more details on dynamic shapes modes, see [Dynamic shapes and vLLM guard dropping](torch_compile.md#dynamic-shapes-and-vllm-guard-dropping).
|
For more details on dynamic shapes modes, see [Dynamic shapes and vLLM guard dropping](torch_compile.md#dynamic-shapes-and-vllm-guard-dropping).
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ receives a request for a LoRA adapter that hasn't been loaded yet, the resolver
|
|||||||
to locate and load the adapter from their configured storage locations. This enables:
|
to locate and load the adapter from their configured storage locations. This enables:
|
||||||
|
|
||||||
- **Dynamic LoRA Loading**: Load adapters on-demand without server restarts
|
- **Dynamic LoRA Loading**: Load adapters on-demand without server restarts
|
||||||
- **Multiple Storage Backends**: Support for filesystem, S3, and custom backends. The built-in `lora_filesystem_resolver` requires a local storage path, but custom resolvers can be implemented to fetch from any source.
|
- **Multiple Storage Backends**: Support for filesystem, S3, and custom backends. The built-in `lora_filesystem_resolver` requires a local storage path, while the built-in `hf_hub_resolver` will pull LoRA adapters from Huggingface Hub and proceed in an identical manner. In general, custom resolvers can be implemented to fetch from any source.
|
||||||
- **Automatic Discovery**: Seamless integration with existing LoRA workflows
|
- **Automatic Discovery**: Seamless integration with existing LoRA workflows
|
||||||
- **Scalable Deployment**: Centralized adapter management across multiple vLLM instances
|
- **Scalable Deployment**: Centralized adapter management across multiple vLLM instances
|
||||||
|
|
||||||
|
|||||||
@@ -36,8 +36,7 @@ th {
|
|||||||
| pplx | batched | fp8,int8 | G,A,T | Y | Y | [`PplxPrepareAndFinalize`][vllm.model_executor.layers.fused_moe.pplx_prepare_finalize.PplxPrepareAndFinalize] |
|
| pplx | batched | fp8,int8 | G,A,T | Y | Y | [`PplxPrepareAndFinalize`][vllm.model_executor.layers.fused_moe.pplx_prepare_finalize.PplxPrepareAndFinalize] |
|
||||||
| deepep_high_throughput | standard | fp8 | G(128),A,T<sup>2</sup> | Y | Y | [`DeepEPLLPrepareAndFinalize`][vllm.model_executor.layers.fused_moe.deepep_ll_prepare_finalize.DeepEPLLPrepareAndFinalize] |
|
| deepep_high_throughput | standard | fp8 | G(128),A,T<sup>2</sup> | Y | Y | [`DeepEPLLPrepareAndFinalize`][vllm.model_executor.layers.fused_moe.deepep_ll_prepare_finalize.DeepEPLLPrepareAndFinalize] |
|
||||||
| deepep_low_latency | batched | fp8 | G(128),A,T<sup>3</sup> | Y | Y | [`DeepEPHTPrepareAndFinalize`][vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize.DeepEPHTPrepareAndFinalize] |
|
| deepep_low_latency | batched | fp8 | G(128),A,T<sup>3</sup> | Y | Y | [`DeepEPHTPrepareAndFinalize`][vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize.DeepEPHTPrepareAndFinalize] |
|
||||||
| flashinfer_all2allv | standard | nvfp4,fp8 | G,A,T | N | N | [`FlashInferAllToAllMoEPrepareAndFinalize`][vllm.model_executor.layers.fused_moe.flashinfer_cutlass_prepare_finalize.FlashInferAllToAllMoEPrepareAndFinalize] |
|
| flashinfer_all2allv | standard | nvfp4,fp8 | G,A,T | N | N | [`FlashInferA2APrepareAndFinalize`][vllm.model_executor.layers.fused_moe.flashinfer_a2a_prepare_finalize.FlashInferA2APrepareAndFinalize] |
|
||||||
| flashinfer<sup>4</sup> | standard | nvfp4,fp8 | G,A,T | N | N | [`FlashInferCutlassMoEPrepareAndFinalize`][vllm.model_executor.layers.fused_moe.flashinfer_cutlass_prepare_finalize.FlashInferCutlassMoEPrepareAndFinalize] |
|
|
||||||
| MoEPrepareAndFinalizeNoEP<sup>5</sup> | standard | fp8,int8 | G,A,T | N | Y | [`MoEPrepareAndFinalizeNoEP`][vllm.model_executor.layers.fused_moe.prepare_finalize.MoEPrepareAndFinalizeNoEP] |
|
| MoEPrepareAndFinalizeNoEP<sup>5</sup> | standard | fp8,int8 | G,A,T | N | Y | [`MoEPrepareAndFinalizeNoEP`][vllm.model_executor.layers.fused_moe.prepare_finalize.MoEPrepareAndFinalizeNoEP] |
|
||||||
| BatchedPrepareAndFinalize<sup>5</sup> | batched | fp8,int8 | G,A,T | N | Y | [`BatchedPrepareAndFinalize`][vllm.model_executor.layers.fused_moe.fused_batched_moe.BatchedPrepareAndFinalize] |
|
| BatchedPrepareAndFinalize<sup>5</sup> | batched | fp8,int8 | G,A,T | N | Y | [`BatchedPrepareAndFinalize`][vllm.model_executor.layers.fused_moe.fused_batched_moe.BatchedPrepareAndFinalize] |
|
||||||
|
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ Every plugin has three parts:
|
|||||||
- `_enum`: This property is the device enumeration from [PlatformEnum][vllm.platforms.interface.PlatformEnum]. Usually, it should be `PlatformEnum.OOT`, which means the platform is out-of-tree.
|
- `_enum`: This property is the device enumeration from [PlatformEnum][vllm.platforms.interface.PlatformEnum]. Usually, it should be `PlatformEnum.OOT`, which means the platform is out-of-tree.
|
||||||
- `device_type`: This property should return the type of the device which pytorch uses. For example, `"cpu"`, `"cuda"`, etc.
|
- `device_type`: This property should return the type of the device which pytorch uses. For example, `"cpu"`, `"cuda"`, etc.
|
||||||
- `device_name`: This property is set the same as `device_type` usually. It's mainly used for logging purposes.
|
- `device_name`: This property is set the same as `device_type` usually. It's mainly used for logging purposes.
|
||||||
- `check_and_update_config`: This function is called very early in the vLLM's initialization process. It's used for plugins to update the vllm configuration. For example, the block size, graph mode config, etc, can be updated in this function. The most important thing is that the **worker_cls** should be set in this function to let vLLM know which worker class to use for the worker process.
|
- `check_and_update_config`: This function is called very early in the vLLM's initialization process. It's used for plugins to update the vllm configuration. For example, the block size, graph mode config, etc., can be updated in this function. The most important thing is that the **worker_cls** should be set in this function to let vLLM know which worker class to use for the worker process.
|
||||||
- `get_attn_backend_cls`: This function should return the attention backend class's fully qualified name.
|
- `get_attn_backend_cls`: This function should return the attention backend class's fully qualified name.
|
||||||
- `get_device_communicator_cls`: This function should return the device communicator class's fully qualified name.
|
- `get_device_communicator_cls`: This function should return the device communicator class's fully qualified name.
|
||||||
|
|
||||||
@@ -126,7 +126,7 @@ Every plugin has three parts:
|
|||||||
|
|
||||||
5. Implement the attention backend class `MyDummyAttention` in `my_dummy_attention.py`. The attention backend class should inherit from [AttentionBackend][vllm.v1.attention.backend.AttentionBackend]. It's used to calculate attentions with your device. Take `vllm.v1.attention.backends` as examples, it contains many attention backend implementations.
|
5. Implement the attention backend class `MyDummyAttention` in `my_dummy_attention.py`. The attention backend class should inherit from [AttentionBackend][vllm.v1.attention.backend.AttentionBackend]. It's used to calculate attentions with your device. Take `vllm.v1.attention.backends` as examples, it contains many attention backend implementations.
|
||||||
|
|
||||||
6. Implement custom ops for high performance. Most ops can be ran by pytorch native implementation, while the performance may not be good. In this case, you can implement specific custom ops for your plugins. Currently, there are kinds of custom ops vLLM supports:
|
6. Implement custom ops for high performance. Most ops can be run by pytorch native implementation, while the performance may not be good. In this case, you can implement specific custom ops for your plugins. Currently, there are kinds of custom ops vLLM supports:
|
||||||
|
|
||||||
- pytorch ops
|
- pytorch ops
|
||||||
there are 3 kinds of pytorch ops:
|
there are 3 kinds of pytorch ops:
|
||||||
|
|||||||
@@ -36,12 +36,12 @@ th:not(:first-child) {
|
|||||||
}
|
}
|
||||||
</style>
|
</style>
|
||||||
|
|
||||||
| Feature | [CP](../configuration/optimization.md#chunked-prefill) | [APC](automatic_prefix_caching.md) | [LoRA](lora.md) | [SD](spec_decode.md) | CUDA graph | [pooling](../models/pooling_models.md) | <abbr title="Encoder-Decoder Models">enc-dec</abbr> | <abbr title="Logprobs">logP</abbr> | <abbr title="Prompt Logprobs">prmpt logP</abbr> | <abbr title="Async Output Processing">async output</abbr> | multi-step | <abbr title="Multimodal Inputs">mm</abbr> | best-of | beam-search | [prompt-embeds](prompt_embeds.md) |
|
| Feature | [CP](../configuration/optimization.md#chunked-prefill) | [APC](automatic_prefix_caching.md) | [LoRA](lora.md) | [SD](spec_decode/README.md) | CUDA graph | [pooling](../models/pooling_models.md) | <abbr title="Encoder-Decoder Models">enc-dec</abbr> | <abbr title="Logprobs">logP</abbr> | <abbr title="Prompt Logprobs">prmpt logP</abbr> | <abbr title="Async Output Processing">async output</abbr> | multi-step | <abbr title="Multimodal Inputs">mm</abbr> | best-of | beam-search | [prompt-embeds](prompt_embeds.md) |
|
||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
||||||
| [CP](../configuration/optimization.md#chunked-prefill) | ✅ | | | | | | | | | | | | | | |
|
| [CP](../configuration/optimization.md#chunked-prefill) | ✅ | | | | | | | | | | | | | | |
|
||||||
| [APC](automatic_prefix_caching.md) | ✅ | ✅ | | | | | | | | | | | | | |
|
| [APC](automatic_prefix_caching.md) | ✅ | ✅ | | | | | | | | | | | | | |
|
||||||
| [LoRA](lora.md) | ✅ | ✅ | ✅ | | | | | | | | | | | | |
|
| [LoRA](lora.md) | ✅ | ✅ | ✅ | | | | | | | | | | | | |
|
||||||
| [SD](spec_decode.md) | ✅ | ✅ | ❌ | ✅ | | | | | | | | | | | |
|
| [SD](spec_decode/README.md) | ✅ | ✅ | ❌ | ✅ | | | | | | | | | | | |
|
||||||
| CUDA graph | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | | | | | |
|
| CUDA graph | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | | | | | |
|
||||||
| [pooling](../models/pooling_models.md) | 🟠\* | 🟠\* | ✅ | ❌ | ✅ | ✅ | | | | | | | | | |
|
| [pooling](../models/pooling_models.md) | 🟠\* | 🟠\* | ✅ | ❌ | ✅ | ✅ | | | | | | | | | |
|
||||||
| <abbr title="Encoder-Decoder Models">enc-dec</abbr> | ❌ | [❌](https://github.com/vllm-project/vllm/issues/7366) | ❌ | [❌](https://github.com/vllm-project/vllm/issues/7366) | ✅ | ✅ | ✅ | | | | | | | | |
|
| <abbr title="Encoder-Decoder Models">enc-dec</abbr> | ❌ | [❌](https://github.com/vllm-project/vllm/issues/7366) | ❌ | [❌](https://github.com/vllm-project/vllm/issues/7366) | ✅ | ✅ | ✅ | | | | | | | | |
|
||||||
@@ -64,7 +64,7 @@ th:not(:first-child) {
|
|||||||
| [CP](../configuration/optimization.md#chunked-prefill) | [❌](https://github.com/vllm-project/vllm/issues/2729) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
| [CP](../configuration/optimization.md#chunked-prefill) | [❌](https://github.com/vllm-project/vllm/issues/2729) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [APC](automatic_prefix_caching.md) | [❌](https://github.com/vllm-project/vllm/issues/3687) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
| [APC](automatic_prefix_caching.md) | [❌](https://github.com/vllm-project/vllm/issues/3687) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [LoRA](lora.md) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
| [LoRA](lora.md) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
| [SD](spec_decode.md) | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
|
| [SD](spec_decode/README.md) | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
|
||||||
| CUDA graph | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | [❌](https://github.com/vllm-project/vllm/issues/26970) |
|
| CUDA graph | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ | [❌](https://github.com/vllm-project/vllm/issues/26970) |
|
||||||
| [pooling](../models/pooling_models.md) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
| [pooling](../models/pooling_models.md) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||||
| <abbr title="Encoder-Decoder Models">enc-dec</abbr> | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ |
|
| <abbr title="Encoder-Decoder Models">enc-dec</abbr> | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | ✅ |
|
||||||
|
|||||||
@@ -159,10 +159,12 @@ Alternatively, you can use the LoRAResolver plugin to dynamically load LoRA adap
|
|||||||
|
|
||||||
You can set up multiple LoRAResolver plugins if you want to load LoRA adapters from different sources. For example, you might have one resolver for local files and another for S3 storage. vLLM will load the first LoRA adapter that it finds.
|
You can set up multiple LoRAResolver plugins if you want to load LoRA adapters from different sources. For example, you might have one resolver for local files and another for S3 storage. vLLM will load the first LoRA adapter that it finds.
|
||||||
|
|
||||||
You can either install existing plugins or implement your own. By default, vLLM comes with a [resolver plugin to load LoRA adapters from a local directory.](https://github.com/vllm-project/vllm/tree/main/vllm/plugins/lora_resolvers)
|
You can either install existing plugins or implement your own. By default, vLLM comes with a [resolver plugin to load LoRA adapters from a local directory, as well as a resolver plugin to load LoRA adapters from repositories on Hugging Face Hub](https://github.com/vllm-project/vllm/tree/main/vllm/plugins/lora_resolvers)
|
||||||
To enable this resolver, set `VLLM_ALLOW_RUNTIME_LORA_UPDATING` to True, set `VLLM_PLUGINS` to include `lora_filesystem_resolver`, and then set `VLLM_LORA_RESOLVER_CACHE_DIR` to a local directory. When vLLM receives a request using a LoRA adapter `foobar`,
|
To enable either of these resolvers, you must `set VLLM_ALLOW_RUNTIME_LORA_UPDATING` to True.
|
||||||
it will first look in the local directory for a directory `foobar`, and attempt to load the contents of that directory as a LoRA adapter. If successful, the request will complete as normal and
|
|
||||||
that adapter will then be available for normal use on the server.
|
- To leverage a local directory, set `VLLM_PLUGINS` to include `lora_filesystem_resolver` and set `VLLM_LORA_RESOLVER_CACHE_DIR` to a local directory. When vLLM receives a request using a LoRA adapter `foobar`,
|
||||||
|
it will first look in the local directory for a directory `foobar`, and attempt to load the contents of that directory as a LoRA adapter. If successful, the request will complete as normal and that adapter will then be available for normal use on the server.
|
||||||
|
- To leverage repositories on Hugging Face Hub, set `VLLM_PLUGINS` to include `lora_hf_hub_resolver` and set `VLLM_LORA_RESOLVER_HF_REPO_LIST` to a comma separated list of repository IDs on Hugging Face Hub. When vLLM receives a request for the LoRA adapter `my/repo/subpath`, it will download the adapter at the `subpath` of `my/repo` if it exists and contains an `adapter_config.json`, then build a request to the cached dir for the adapter, similar to the `lora_filesystem_resolver`. Please note that enabling remote downloads is insecure and not intended for use in production environments.
|
||||||
|
|
||||||
Alternatively, follow these example steps to implement your own plugin:
|
Alternatively, follow these example steps to implement your own plugin:
|
||||||
|
|
||||||
|
|||||||
@@ -184,15 +184,6 @@ Support use case: Prefill with 'HND' and decode with 'NHD' with experimental con
|
|||||||
--kv-transfer-config '{..., "enable_permute_local_kv":"True"}'
|
--kv-transfer-config '{..., "enable_permute_local_kv":"True"}'
|
||||||
```
|
```
|
||||||
|
|
||||||
### Cross layers blocks
|
|
||||||
|
|
||||||
By default, this feature is disabled. On attention backends that support this feature, each logical block is contiguous in physical memory. This reduces the number of buffers that need to be transferred.
|
|
||||||
To enable this feature:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
--kv-transfer-config '{..., "kv_connector_extra_config": {"enable_cross_layers_blocks": "True"}}'
|
|
||||||
```
|
|
||||||
|
|
||||||
## Example Scripts/Code
|
## Example Scripts/Code
|
||||||
|
|
||||||
Refer to these example scripts in the vLLM repository:
|
Refer to these example scripts in the vLLM repository:
|
||||||
|
|||||||
@@ -2,11 +2,13 @@
|
|||||||
|
|
||||||
Quantization trades off model precision for smaller memory footprint, allowing large models to be run on a wider range of devices.
|
Quantization trades off model precision for smaller memory footprint, allowing large models to be run on a wider range of devices.
|
||||||
|
|
||||||
Contents:
|
!!! tip
|
||||||
|
To get started with quantization, see [LLM Compressor](llm_compressor.md), a library for optimizing models for deployment with vLLM that supports FP8, INT8, INT4, and other quantization formats.
|
||||||
|
|
||||||
|
The following are the supported quantization formats for vLLM:
|
||||||
|
|
||||||
- [AutoAWQ](auto_awq.md)
|
- [AutoAWQ](auto_awq.md)
|
||||||
- [BitsAndBytes](bnb.md)
|
- [BitsAndBytes](bnb.md)
|
||||||
- [BitBLAS](bitblas.md)
|
|
||||||
- [GGUF](gguf.md)
|
- [GGUF](gguf.md)
|
||||||
- [GPTQModel](gptqmodel.md)
|
- [GPTQModel](gptqmodel.md)
|
||||||
- [Intel Neural Compressor](inc.md)
|
- [Intel Neural Compressor](inc.md)
|
||||||
@@ -49,8 +51,6 @@ th:not(:first-child) {
|
|||||||
| Marlin (GPTQ/AWQ/FP8) | ❌ | ❌ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ | ❌ |
|
| Marlin (GPTQ/AWQ/FP8) | ❌ | ❌ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ | ❌ |
|
||||||
| INT8 (W8A8) | ❌ | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ | ✅︎ |
|
| INT8 (W8A8) | ❌ | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ | ✅︎ |
|
||||||
| FP8 (W8A8) | ❌ | ❌ | ❌ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ |
|
| FP8 (W8A8) | ❌ | ❌ | ❌ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ |
|
||||||
| BitBLAS | ✅︎ | ✅ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ | ❌ |
|
|
||||||
| BitBLAS (GPTQ) | ❌ | ❌ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ | ❌ |
|
|
||||||
| bitsandbytes | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ | ❌ |
|
| bitsandbytes | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ | ❌ |
|
||||||
| DeepSpeedFP | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ | ❌ |
|
| DeepSpeedFP | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ | ❌ |
|
||||||
| GGUF | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ |
|
| GGUF | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ✅︎ | ❌ | ❌ |
|
||||||
|
|||||||
@@ -1,58 +0,0 @@
|
|||||||
# BitBLAS
|
|
||||||
|
|
||||||
vLLM now supports [BitBLAS](https://github.com/microsoft/BitBLAS) for more efficient and flexible model inference. Compared to other quantization frameworks, BitBLAS provides more precision combinations.
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
Ensure your hardware supports the selected `dtype` (`torch.bfloat16` or `torch.float16`).
|
|
||||||
Most recent NVIDIA GPUs support `float16`, while `bfloat16` is more common on newer architectures like Ampere or Hopper.
|
|
||||||
For details see [supported hardware](README.md#supported-hardware).
|
|
||||||
|
|
||||||
Below are the steps to utilize BitBLAS with vLLM.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install bitblas>=0.1.0
|
|
||||||
```
|
|
||||||
|
|
||||||
vLLM reads the model's config file and supports pre-quantized checkpoints.
|
|
||||||
|
|
||||||
You can find pre-quantized models on:
|
|
||||||
|
|
||||||
- [Hugging Face (BitBLAS)](https://huggingface.co/models?search=bitblas)
|
|
||||||
- [Hugging Face (GPTQ)](https://huggingface.co/models?search=gptq)
|
|
||||||
|
|
||||||
Usually, these repositories have a `quantize_config.json` file that includes a `quantization_config` section.
|
|
||||||
|
|
||||||
## Read bitblas format checkpoint
|
|
||||||
|
|
||||||
```python
|
|
||||||
from vllm import LLM
|
|
||||||
import torch
|
|
||||||
|
|
||||||
# "hxbgsyxh/llama-13b-4bit-g-1-bitblas" is a pre-quantized checkpoint.
|
|
||||||
model_id = "hxbgsyxh/llama-13b-4bit-g-1-bitblas"
|
|
||||||
llm = LLM(
|
|
||||||
model=model_id,
|
|
||||||
dtype=torch.bfloat16,
|
|
||||||
trust_remote_code=True,
|
|
||||||
quantization="bitblas",
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Read gptq format checkpoint
|
|
||||||
|
|
||||||
??? code
|
|
||||||
|
|
||||||
```python
|
|
||||||
from vllm import LLM
|
|
||||||
import torch
|
|
||||||
|
|
||||||
# "hxbgsyxh/llama-13b-4bit-g-1" is a pre-quantized checkpoint.
|
|
||||||
model_id = "hxbgsyxh/llama-13b-4bit-g-1"
|
|
||||||
llm = LLM(
|
|
||||||
model=model_id,
|
|
||||||
dtype=torch.float16,
|
|
||||||
trust_remote_code=True,
|
|
||||||
quantization="bitblas",
|
|
||||||
max_model_len=1024,
|
|
||||||
)
|
|
||||||
```
|
|
||||||
31
docs/features/quantization/llm_compressor.md
Normal file
31
docs/features/quantization/llm_compressor.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# LLM Compressor
|
||||||
|
|
||||||
|
[LLM Compressor](https://docs.vllm.ai/projects/llm-compressor/en/latest/) is a library for optimizing models for deployment with vLLM.
|
||||||
|
It provides a comprehensive set of quantization algorithms, including support for techniques such as FP4, FP8, INT8, and INT4 quantization.
|
||||||
|
|
||||||
|
## Why use LLM Compressor?
|
||||||
|
|
||||||
|
Modern LLMs often contain billions of parameters stored in 16-bit or 32-bit floating point, requiring substantial GPU memory and limiting deployment options.
|
||||||
|
Quantization lowers memory requirements while maintaining inference output quality by reducing the precision of model weights and activations to smaller data types.
|
||||||
|
|
||||||
|
LLM Compressor provides the following benefits:
|
||||||
|
|
||||||
|
- **Reduced memory footprint**: Run larger models on smaller GPUs.
|
||||||
|
- **Lower inference costs**: Serve more concurrent users per GPU, directly reducing the cost per query in production deployments.
|
||||||
|
- **Faster inference**: Smaller data types mean less memory bandwidth consumed, which often translates to higher throughput, especially for memory-bound workloads.
|
||||||
|
|
||||||
|
LLM Compressor handles the complexity of quantization, calibration, and format conversion, producing models ready for immediate use with vLLM.
|
||||||
|
|
||||||
|
## Key features
|
||||||
|
|
||||||
|
- **Multiple Quantization Algorithms**: Support for AWQ, GPTQ, AutoRound, and Round-to-Nearest.
|
||||||
|
Also includes support for QuIP and SpinQuant-style transforms as well as KV cache and attention quantization.
|
||||||
|
- **Multiple Quantization Methods**: Support for FP8, INT8, INT4, NVFP4, MXFP4, and mixed-precision quantization
|
||||||
|
- **One-Shot Quantization**: Quantize models quickly with minimal calibration data
|
||||||
|
- **vLLM Integration**: Seamlessly deploy quantized models with vLLM using the compressed-tensors format
|
||||||
|
- **Hugging Face Compatibility**: Works with models from the Hugging Face Hub
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
|
||||||
|
- [LLM Compressor examples](https://github.com/vllm-project/llm-compressor/tree/main/examples)
|
||||||
|
- [GitHub Repository](https://github.com/vllm-project/llm-compressor)
|
||||||
@@ -11,6 +11,9 @@
|
|||||||
This document shows how to use [Speculative Decoding](https://x.com/karpathy/status/1697318534555336961) with vLLM.
|
This document shows how to use [Speculative Decoding](https://x.com/karpathy/status/1697318534555336961) with vLLM.
|
||||||
Speculative decoding is a technique which improves inter-token latency in memory-bound LLM inference.
|
Speculative decoding is a technique which improves inter-token latency in memory-bound LLM inference.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
To train your own draft models for speculative decoding, see [Speculators](speculators.md), a library for training draft models that integrates seamlessly with vLLM.
|
||||||
|
|
||||||
## Speculating with a draft model
|
## Speculating with a draft model
|
||||||
|
|
||||||
The following code configures vLLM in an offline mode to use speculative decoding with a draft model, speculating 5 tokens at a time.
|
The following code configures vLLM in an offline mode to use speculative decoding with a draft model, speculating 5 tokens at a time.
|
||||||
@@ -223,7 +226,7 @@ A variety of speculative models of this type are available on HF hub:
|
|||||||
## Speculating using EAGLE based draft models
|
## Speculating using EAGLE based draft models
|
||||||
|
|
||||||
The following code configures vLLM to use speculative decoding where proposals are generated by
|
The following code configures vLLM to use speculative decoding where proposals are generated by
|
||||||
an [EAGLE (Extrapolation Algorithm for Greater Language-model Efficiency)](https://arxiv.org/pdf/2401.15077) based draft model. A more detailed example for offline mode, including how to extract request level acceptance rate, can be found [here](../../examples/offline_inference/spec_decode.py).
|
an [EAGLE (Extrapolation Algorithm for Greater Language-model Efficiency)](https://arxiv.org/pdf/2401.15077) based draft model. A more detailed example for offline mode, including how to extract request level acceptance rate, can be found in [examples/offline_inference/spec_decode.py](../../../examples/offline_inference/spec_decode.py)
|
||||||
|
|
||||||
??? code
|
??? code
|
||||||
|
|
||||||
@@ -313,7 +316,7 @@ speculative decoding, breaking down the guarantees into three key areas:
|
|||||||
3. **vLLM Logprob Stability**
|
3. **vLLM Logprob Stability**
|
||||||
\- vLLM does not currently guarantee stable token log probabilities (logprobs). This can result in different outputs for the
|
\- vLLM does not currently guarantee stable token log probabilities (logprobs). This can result in different outputs for the
|
||||||
same request across runs. For more details, see the FAQ section
|
same request across runs. For more details, see the FAQ section
|
||||||
titled *Can the output of a prompt vary across runs in vLLM?* in the [FAQs](../usage/faq.md).
|
titled *Can the output of a prompt vary across runs in vLLM?* in the [FAQs](../../usage/faq.md).
|
||||||
|
|
||||||
While vLLM strives to ensure losslessness in speculative decoding, variations in generated outputs with and without speculative decoding
|
While vLLM strives to ensure losslessness in speculative decoding, variations in generated outputs with and without speculative decoding
|
||||||
can occur due to following factors:
|
can occur due to following factors:
|
||||||
@@ -322,7 +325,7 @@ can occur due to following factors:
|
|||||||
- **Batch Size and Numerical Stability**: Changes in batch size may cause variations in logprobs and output probabilities, potentially
|
- **Batch Size and Numerical Stability**: Changes in batch size may cause variations in logprobs and output probabilities, potentially
|
||||||
due to non-deterministic behavior in batched operations or numerical instability.
|
due to non-deterministic behavior in batched operations or numerical instability.
|
||||||
|
|
||||||
For mitigation strategies, please refer to the FAQ entry *Can the output of a prompt vary across runs in vLLM?* in the [FAQs](../usage/faq.md).
|
For mitigation strategies, please refer to the FAQ entry *Can the output of a prompt vary across runs in vLLM?* in the [FAQs](../../usage/faq.md).
|
||||||
|
|
||||||
## Resources for vLLM contributors
|
## Resources for vLLM contributors
|
||||||
|
|
||||||
29
docs/features/spec_decode/speculators.md
Normal file
29
docs/features/spec_decode/speculators.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# Speculators
|
||||||
|
|
||||||
|
[Speculators](https://docs.vllm.ai/projects/speculators/en/latest/) is a library for accelerating LLM inference through speculative decoding, providing efficient draft model training that integrates seamlessly with vLLM to reduce latency and improve throughput.
|
||||||
|
|
||||||
|
Speculators provides the following key features:
|
||||||
|
|
||||||
|
- **Offline training data generation using vLLM**: Enable the generation of hidden states using vLLM. Data samples are saved to disk and can be used for draft model training.
|
||||||
|
- **Draft model training support**: E2E training support of single and multi-layer draft models. Training is supported for both non-MoE and MoE models.
|
||||||
|
- **Standardized, extensible format**: Provides a Hugging Face-compatible format for defining speculative models, with tools to convert from external research repositories into a standard speculators format for easy adoption.
|
||||||
|
- **Seamless vLLM Integration**: Built for direct deployment into vLLM, enabling low-latency, production-grade inference with minimal overhead.
|
||||||
|
|
||||||
|
## Why use Speculators?
|
||||||
|
|
||||||
|
Large language models generate text one token at a time, which creates a fundamental bottleneck: each token requires a full forward pass through the model, leaving GPU compute underutilized while waiting for memory-bound operations.
|
||||||
|
Speculative decoding addresses this by using a smaller, faster "draft" model (often times, just a single transformer layer) to predict multiple tokens ahead, and then verifying tokens in parallel with the primary model.
|
||||||
|
|
||||||
|
Speculative decoding provides the following benefits:
|
||||||
|
|
||||||
|
- **Reduced latency**: Generates tokens 2-3 times faster for interactive applications such as chatbots and code assistants, where response time directly impacts user experience
|
||||||
|
- **Better GPU utilization**: Converts latency and memory-bound decoding in the large model into compute-bound parallel token verification, improving hardware utilization.
|
||||||
|
- **No quality loss**: Speculative decoding does not approximate the target model. Accepted tokens are exactly those the target model would have produced under the same sampling configuration; rejected draft tokens are discarded and regenerated by the target model.
|
||||||
|
- **Cost efficiency**: Serve more requests per GPU by reducing the time each request occupies the hardware
|
||||||
|
|
||||||
|
Speculators is particularly valuable for latency-sensitive applications where users are waiting for responses in real-time, such as conversational AI, interactive coding assistants, and streaming text generation.
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
|
||||||
|
- [Speculators examples](https://github.com/vllm-project/speculators/tree/main/examples)
|
||||||
|
- [GitHub Repository](https://github.com/vllm-project/speculators)
|
||||||
@@ -171,10 +171,6 @@ uv pip install dist/*.whl
|
|||||||
|
|
||||||
## Building for your target CPU
|
## Building for your target CPU
|
||||||
|
|
||||||
vLLM supports building Docker images for x86 CPU platforms with automatic instruction set detection.
|
|
||||||
|
|
||||||
### Basic build command
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker build -f docker/Dockerfile.cpu \
|
docker build -f docker/Dockerfile.cpu \
|
||||||
--build-arg VLLM_CPU_DISABLE_AVX512=<false (default)|true> \
|
--build-arg VLLM_CPU_DISABLE_AVX512=<false (default)|true> \
|
||||||
@@ -187,29 +183,21 @@ docker build -f docker/Dockerfile.cpu \
|
|||||||
--target vllm-openai .
|
--target vllm-openai .
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note "Instruction set auto-detection"
|
!!! note "Auto-detection by default"
|
||||||
By default, vLLM will auto-detect CPU instruction sets (AVX512, AVX2, etc.) from the build system's CPU flags. Build arguments like `VLLM_CPU_AVX2`, `VLLM_CPU_AVX512`, `VLLM_CPU_AVX512BF16`, `VLLM_CPU_AVX512VNNI`, and `VLLM_CPU_AMXBF16` are primarily used for **cross-compilation** or for building container images on systems that don't have the target platforms ISA:
|
By default, CPU instruction sets (AVX512, AVX2, etc.) are automatically detected from the build system's CPU flags. Build arguments like `VLLM_CPU_AVX2`, `VLLM_CPU_AVX512`, `VLLM_CPU_AVX512BF16`, `VLLM_CPU_AVX512VNNI`, and `VLLM_CPU_AMXBF16` are used for cross-compilation:
|
||||||
|
|
||||||
- Set `VLLM_CPU_{ISA}=true` to force-enable an instruction set (for cross-compilation to target platforms with that ISA)
|
- `VLLM_CPU_{ISA}=true` - Force-enable the instruction set (build with ISA regardless of build system capabilities)
|
||||||
- Set `VLLM_CPU_{ISA}=false` to rely on auto-detection
|
- `VLLM_CPU_{ISA}=false` - Rely on auto-detection (default)
|
||||||
- When an ISA build arg is set to `true`, vLLM will build with that instruction set regardless of the build system's CPU capabilities
|
|
||||||
|
|
||||||
### Build examples
|
### Examples
|
||||||
|
|
||||||
**Example 1: Auto-detection (native build)**
|
**Auto-detection build (default)**
|
||||||
|
|
||||||
Build on a machine with the same CPU as your target deployment:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Auto-detects all CPU features from the build system
|
docker build -f docker/Dockerfile.cpu --tag vllm-cpu-env --target vllm-openai .
|
||||||
docker build -f docker/Dockerfile.cpu \
|
|
||||||
--tag vllm-cpu-env \
|
|
||||||
--target vllm-openai .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Example 2: Cross-compilation for AVX512 deployment**
|
**Cross-compile for AVX512**
|
||||||
|
|
||||||
Build an AVX512 image on any x86_64 system (even without AVX512):
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker build -f docker/Dockerfile.cpu \
|
docker build -f docker/Dockerfile.cpu \
|
||||||
@@ -220,9 +208,7 @@ docker build -f docker/Dockerfile.cpu \
|
|||||||
--target vllm-openai .
|
--target vllm-openai .
|
||||||
```
|
```
|
||||||
|
|
||||||
**Example 3: Cross-compilation for AVX2 deployment**
|
**Cross-compile for AVX2**
|
||||||
|
|
||||||
Build an AVX2 image for older CPUs:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker build -f docker/Dockerfile.cpu \
|
docker build -f docker/Dockerfile.cpu \
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import importlib.metadata
|
|||||||
import importlib.util
|
import importlib.util
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
import textwrap
|
||||||
import traceback
|
import traceback
|
||||||
from argparse import SUPPRESS, Action, HelpFormatter
|
from argparse import SUPPRESS, Action, HelpFormatter
|
||||||
from collections.abc import Iterable
|
from collections.abc import Iterable
|
||||||
@@ -152,21 +153,21 @@ class MarkdownFormatter(HelpFormatter):
|
|||||||
heading_md = f"{self._argument_heading_prefix} {option_strings}\n\n"
|
heading_md = f"{self._argument_heading_prefix} {option_strings}\n\n"
|
||||||
self._markdown_output.append(heading_md)
|
self._markdown_output.append(heading_md)
|
||||||
|
|
||||||
if choices := action.choices:
|
if action.choices or isinstance(action.metavar, (list, tuple)):
|
||||||
choices = f"`{'`, `'.join(str(c) for c in choices)}`"
|
choices_iterable = action.choices or action.metavar
|
||||||
self._markdown_output.append(f"Possible choices: {choices}\n\n")
|
choices = f"`{'`, `'.join(str(c) for c in choices_iterable)}`"
|
||||||
elif (metavar := action.metavar) and isinstance(metavar, (list, tuple)):
|
self._markdown_output.append(f": Possible choices: {choices}\n\n")
|
||||||
metavar = f"`{'`, `'.join(str(m) for m in metavar)}`"
|
|
||||||
self._markdown_output.append(f"Possible choices: {metavar}\n\n")
|
|
||||||
|
|
||||||
if action.help:
|
if action.help:
|
||||||
self._markdown_output.append(f"{action.help}\n\n")
|
help_dd = ":" + textwrap.indent(action.help, " ")[1:]
|
||||||
|
self._markdown_output.append(f"{help_dd}\n\n")
|
||||||
|
|
||||||
if (default := action.default) != SUPPRESS:
|
# None usually means the default is determined at runtime
|
||||||
|
if (default := action.default) != SUPPRESS and default is not None:
|
||||||
# Make empty string defaults visible
|
# Make empty string defaults visible
|
||||||
if default == "":
|
if default == "":
|
||||||
default = '""'
|
default = '""'
|
||||||
self._markdown_output.append(f"Default: `{default}`\n\n")
|
self._markdown_output.append(f": Default: `{default}`\n\n")
|
||||||
|
|
||||||
def format_help(self):
|
def format_help(self):
|
||||||
"""Return the formatted help as markdown."""
|
"""Return the formatted help as markdown."""
|
||||||
|
|||||||
@@ -327,7 +327,7 @@ curl -s http://localhost:8000/pooling -H "Content-Type: application/json" -d '{
|
|||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
Due to limitations in the the output schema, the output consists of a list of
|
Due to limitations in the output schema, the output consists of a list of
|
||||||
token scores for each token for each input. This means that you'll have to call
|
token scores for each token for each input. This means that you'll have to call
|
||||||
`/tokenize` as well to be able to pair tokens with scores.
|
`/tokenize` as well to be able to pair tokens with scores.
|
||||||
Refer to the tests in `tests/models/language/pooling/test_bge_m3.py` to see how
|
Refer to the tests in `tests/models/language/pooling/test_bge_m3.py` to see how
|
||||||
|
|||||||
@@ -674,6 +674,7 @@ These models primarily accept the [`LLM.generate`](./generative_models.md#llmgen
|
|||||||
| `GLM4VForCausalLM`<sup>^</sup> | GLM-4V | T + I | `zai-org/glm-4v-9b`, `zai-org/cogagent-9b-20241220`, etc. | ✅︎ | ✅︎ |
|
| `GLM4VForCausalLM`<sup>^</sup> | GLM-4V | T + I | `zai-org/glm-4v-9b`, `zai-org/cogagent-9b-20241220`, etc. | ✅︎ | ✅︎ |
|
||||||
| `Glm4vForConditionalGeneration` | GLM-4.1V-Thinking | T + I<sup>E+</sup> + V<sup>E+</sup> | `zai-org/GLM-4.1V-9B-Thinking`, etc. | ✅︎ | ✅︎ |
|
| `Glm4vForConditionalGeneration` | GLM-4.1V-Thinking | T + I<sup>E+</sup> + V<sup>E+</sup> | `zai-org/GLM-4.1V-9B-Thinking`, etc. | ✅︎ | ✅︎ |
|
||||||
| `Glm4vMoeForConditionalGeneration` | GLM-4.5V | T + I<sup>E+</sup> + V<sup>E+</sup> | `zai-org/GLM-4.5V`, etc. | ✅︎ | ✅︎ |
|
| `Glm4vMoeForConditionalGeneration` | GLM-4.5V | T + I<sup>E+</sup> + V<sup>E+</sup> | `zai-org/GLM-4.5V`, etc. | ✅︎ | ✅︎ |
|
||||||
|
| `GlmOcrForConditionalGeneration` | GLM-OCR | T + I<sup>E+</sup> | `zai-org/GLM-OCR`, etc. | ✅︎ | ✅︎ |
|
||||||
| `GraniteSpeechForConditionalGeneration` | Granite Speech | T + A | `ibm-granite/granite-speech-3.3-8b` | ✅︎ | ✅︎ |
|
| `GraniteSpeechForConditionalGeneration` | Granite Speech | T + A | `ibm-granite/granite-speech-3.3-8b` | ✅︎ | ✅︎ |
|
||||||
| `H2OVLChatModel` | H2OVL | T + I<sup>E+</sup> | `h2oai/h2ovl-mississippi-800m`, `h2oai/h2ovl-mississippi-2b`, etc. | | ✅︎ |
|
| `H2OVLChatModel` | H2OVL | T + I<sup>E+</sup> | `h2oai/h2ovl-mississippi-800m`, `h2oai/h2ovl-mississippi-2b`, etc. | | ✅︎ |
|
||||||
| `HunYuanVLForConditionalGeneration` | HunyuanOCR | T + I<sup>E+</sup> | `tencent/HunyuanOCR`, etc. | ✅︎ | ✅︎ |
|
| `HunYuanVLForConditionalGeneration` | HunyuanOCR | T + I<sup>E+</sup> | `tencent/HunyuanOCR`, etc. | ✅︎ | ✅︎ |
|
||||||
@@ -686,6 +687,7 @@ These models primarily accept the [`LLM.generate`](./generative_models.md#llmgen
|
|||||||
| `KeyeForConditionalGeneration` | Keye-VL-8B-Preview | T + I<sup>E+</sup> + V<sup>E+</sup> | `Kwai-Keye/Keye-VL-8B-Preview` | ✅︎ | ✅︎ |
|
| `KeyeForConditionalGeneration` | Keye-VL-8B-Preview | T + I<sup>E+</sup> + V<sup>E+</sup> | `Kwai-Keye/Keye-VL-8B-Preview` | ✅︎ | ✅︎ |
|
||||||
| `KeyeVL1_5ForConditionalGeneration` | Keye-VL-1_5-8B | T + I<sup>E+</sup> + V<sup>E+</sup> | `Kwai-Keye/Keye-VL-1_5-8B` | ✅︎ | ✅︎ |
|
| `KeyeVL1_5ForConditionalGeneration` | Keye-VL-1_5-8B | T + I<sup>E+</sup> + V<sup>E+</sup> | `Kwai-Keye/Keye-VL-1_5-8B` | ✅︎ | ✅︎ |
|
||||||
| `KimiVLForConditionalGeneration` | Kimi-VL-A3B-Instruct, Kimi-VL-A3B-Thinking | T + I<sup>+</sup> | `moonshotai/Kimi-VL-A3B-Instruct`, `moonshotai/Kimi-VL-A3B-Thinking` | | ✅︎ |
|
| `KimiVLForConditionalGeneration` | Kimi-VL-A3B-Instruct, Kimi-VL-A3B-Thinking | T + I<sup>+</sup> | `moonshotai/Kimi-VL-A3B-Instruct`, `moonshotai/Kimi-VL-A3B-Thinking` | | ✅︎ |
|
||||||
|
| `KimiK25ForConditionalGeneration` | Kimi-K2.5 | T + I<sup>+</sup> | `moonshotai/Kimi-K2.5` | | ✅︎ |
|
||||||
| `LightOnOCRForConditionalGeneration` | LightOnOCR-1B | T + I<sup>+</sup> | `lightonai/LightOnOCR-1B`, etc | ✅︎ | ✅︎ |
|
| `LightOnOCRForConditionalGeneration` | LightOnOCR-1B | T + I<sup>+</sup> | `lightonai/LightOnOCR-1B`, etc | ✅︎ | ✅︎ |
|
||||||
| `Lfm2VlForConditionalGeneration` | LFM2-VL | T + I<sup>+</sup> | `LiquidAI/LFM2-VL-450M`, `LiquidAI/LFM2-VL-3B`, `LiquidAI/LFM2-VL-8B-A1B`, etc. | ✅︎ | ✅︎ |
|
| `Lfm2VlForConditionalGeneration` | LFM2-VL | T + I<sup>+</sup> | `LiquidAI/LFM2-VL-450M`, `LiquidAI/LFM2-VL-3B`, `LiquidAI/LFM2-VL-8B-A1B`, etc. | ✅︎ | ✅︎ |
|
||||||
| `Llama4ForConditionalGeneration` | Llama 4 | T + I<sup>+</sup> | `meta-llama/Llama-4-Scout-17B-16E-Instruct`, `meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8`, `meta-llama/Llama-4-Maverick-17B-128E-Instruct`, etc. | ✅︎ | ✅︎ |
|
| `Llama4ForConditionalGeneration` | Llama 4 | T + I<sup>+</sup> | `meta-llama/Llama-4-Scout-17B-16E-Instruct`, `meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8`, `meta-llama/Llama-4-Maverick-17B-128E-Instruct`, etc. | ✅︎ | ✅︎ |
|
||||||
@@ -718,6 +720,7 @@ These models primarily accept the [`LLM.generate`](./generative_models.md#llmgen
|
|||||||
| `Qwen3VLForConditionalGeneration` | Qwen3-VL | T + I<sup>E+</sup> + V<sup>E+</sup> | `Qwen/Qwen3-VL-4B-Instruct`, etc. | ✅︎ | ✅︎ |
|
| `Qwen3VLForConditionalGeneration` | Qwen3-VL | T + I<sup>E+</sup> + V<sup>E+</sup> | `Qwen/Qwen3-VL-4B-Instruct`, etc. | ✅︎ | ✅︎ |
|
||||||
| `Qwen3VLMoeForConditionalGeneration` | Qwen3-VL-MOE | T + I<sup>E+</sup> + V<sup>E+</sup> | `Qwen/Qwen3-VL-30B-A3B-Instruct`, etc. | ✅︎ | ✅︎ |
|
| `Qwen3VLMoeForConditionalGeneration` | Qwen3-VL-MOE | T + I<sup>E+</sup> + V<sup>E+</sup> | `Qwen/Qwen3-VL-30B-A3B-Instruct`, etc. | ✅︎ | ✅︎ |
|
||||||
| `Qwen3OmniMoeThinkerForConditionalGeneration` | Qwen3-Omni | T + I<sup>E+</sup> + V<sup>E+</sup> + A<sup>+</sup> | `Qwen/Qwen3-Omni-30B-A3B-Instruct`, `Qwen/Qwen3-Omni-30B-A3B-Thinking` | ✅︎ | ✅︎ |
|
| `Qwen3OmniMoeThinkerForConditionalGeneration` | Qwen3-Omni | T + I<sup>E+</sup> + V<sup>E+</sup> + A<sup>+</sup> | `Qwen/Qwen3-Omni-30B-A3B-Instruct`, `Qwen/Qwen3-Omni-30B-A3B-Thinking` | ✅︎ | ✅︎ |
|
||||||
|
| `Qwen3ASRForConditionalGeneration` | Qwen3-ASR | T + A<sup>+</sup> | `Qwen/Qwen3-ASR-1.7B` | ✅︎ | ✅︎ |
|
||||||
| `RForConditionalGeneration` | R-VL-4B | T + I<sup>E+</sup> | `YannQi/R-4B` | | ✅︎ |
|
| `RForConditionalGeneration` | R-VL-4B | T + I<sup>E+</sup> | `YannQi/R-4B` | | ✅︎ |
|
||||||
| `SkyworkR1VChatModel` | Skywork-R1V-38B | T + I | `Skywork/Skywork-R1V-38B` | | ✅︎ |
|
| `SkyworkR1VChatModel` | Skywork-R1V-38B | T + I | `Skywork/Skywork-R1V-38B` | | ✅︎ |
|
||||||
| `SmolVLMForConditionalGeneration` | SmolVLM2 | T + I | `SmolVLM2-2.2B-Instruct` | ✅︎ | |
|
| `SmolVLMForConditionalGeneration` | SmolVLM2 | T + I | `SmolVLM2-2.2B-Instruct` | ✅︎ | |
|
||||||
@@ -767,6 +770,7 @@ Speech2Text models trained specifically for Automatic Speech Recognition.
|
|||||||
| `Gemma3nForConditionalGeneration` | Gemma3n | `google/gemma-3n-E2B-it`, `google/gemma-3n-E4B-it`, etc. | | |
|
| `Gemma3nForConditionalGeneration` | Gemma3n | `google/gemma-3n-E2B-it`, `google/gemma-3n-E4B-it`, etc. | | |
|
||||||
| `GlmAsrForConditionalGeneration` | GLM-ASR | `zai-org/GLM-ASR-Nano-2512` | ✅︎ | ✅︎ |
|
| `GlmAsrForConditionalGeneration` | GLM-ASR | `zai-org/GLM-ASR-Nano-2512` | ✅︎ | ✅︎ |
|
||||||
| `GraniteSpeechForConditionalGeneration` | Granite Speech | `ibm-granite/granite-speech-3.3-2b`, `ibm-granite/granite-speech-3.3-8b`, etc. | ✅︎ | ✅︎ |
|
| `GraniteSpeechForConditionalGeneration` | Granite Speech | `ibm-granite/granite-speech-3.3-2b`, `ibm-granite/granite-speech-3.3-8b`, etc. | ✅︎ | ✅︎ |
|
||||||
|
| `Qwen3ASRForConditionalGeneration` | Qwen3-ASR | `Qwen/Qwen3-ASR-1.7B`, etc. | | ✅︎ |
|
||||||
| `VoxtralForConditionalGeneration` | Voxtral (Mistral format) | `mistralai/Voxtral-Mini-3B-2507`, `mistralai/Voxtral-Small-24B-2507`, etc. | ✅︎ | ✅︎ |
|
| `VoxtralForConditionalGeneration` | Voxtral (Mistral format) | `mistralai/Voxtral-Mini-3B-2507`, `mistralai/Voxtral-Small-24B-2507`, etc. | ✅︎ | ✅︎ |
|
||||||
| `WhisperForConditionalGeneration` | Whisper | `openai/whisper-small`, `openai/whisper-large-v3-turbo`, etc. | | |
|
| `WhisperForConditionalGeneration` | Whisper | `openai/whisper-small`, `openai/whisper-large-v3-turbo`, etc. | | |
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Context parallel mainly solves the problem of serving long context requests. As
|
|||||||
|
|
||||||
During prefill, for a long request with `T` new tokens, we need to compute query/key/value tensors for these new tokens. Say we have `N` GPUs, we can split the request into `N` chunks, and each GPU computes one chunk of the query/key/value tensors.
|
During prefill, for a long request with `T` new tokens, we need to compute query/key/value tensors for these new tokens. Say we have `N` GPUs, we can split the request into `N` chunks, and each GPU computes one chunk of the query/key/value tensors.
|
||||||
|
|
||||||
Depending on the use case, there're two possible strategies:
|
Depending on the use case, there are two possible strategies:
|
||||||
|
|
||||||
1. Partial query, full key/value: If the request token length is moderately long (we can afford holding the full key/value tensors), and the goal is to accelerate the prefill (and amortize the computation time of the prefill across query tokens), then we can gather the key/value tensors from all GPUs and let each GPU compute the attention output corresponding to the query tokens of its chunk.
|
1. Partial query, full key/value: If the request token length is moderately long (we can afford holding the full key/value tensors), and the goal is to accelerate the prefill (and amortize the computation time of the prefill across query tokens), then we can gather the key/value tensors from all GPUs and let each GPU compute the attention output corresponding to the query tokens of its chunk.
|
||||||
2. Partial query, partial key/value: If the request token length is too long, we cannot afford holding the full key/value tensors anymore, then we can only compute one chunk of query/key/value tensors for each GPU, and use techniques like [ring-attention](http://arxiv.org/abs/2310.01889) to send/recv key/value tensors chunk by chunk.
|
2. Partial query, partial key/value: If the request token length is too long, we cannot afford holding the full key/value tensors anymore, then we can only compute one chunk of query/key/value tensors for each GPU, and use techniques like [ring-attention](http://arxiv.org/abs/2310.01889) to send/recv key/value tensors chunk by chunk.
|
||||||
|
|||||||
@@ -1067,4 +1067,4 @@ Key capabilities:
|
|||||||
|
|
||||||
The following example shows how to deploy a large model like DeepSeek R1 with Ray Serve LLM: [examples/online_serving/ray_serve_deepseek.py](../../examples/online_serving/ray_serve_deepseek.py).
|
The following example shows how to deploy a large model like DeepSeek R1 with Ray Serve LLM: [examples/online_serving/ray_serve_deepseek.py](../../examples/online_serving/ray_serve_deepseek.py).
|
||||||
|
|
||||||
Learn more about Ray Serve LLM with the official [Ray Serve LLM documentation](https://docs.ray.io/en/latest/serve/llm/serving-llms.html).
|
Learn more about Ray Serve LLM with the official [Ray Serve LLM documentation](https://docs.ray.io/en/latest/serve/llm/index.html).
|
||||||
|
|||||||
@@ -117,6 +117,31 @@ def run_glmasr(question: str, audio_count: int) -> ModelRequestData:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# FunAudioChat
|
||||||
|
def run_funaudiochat(question: str, audio_count: int) -> ModelRequestData:
|
||||||
|
# NOTE: FunAudioChat is not available on the HuggingFace Hub at the time of
|
||||||
|
# writing. Pass a local model path via `--model`.
|
||||||
|
model_name = "funaudiochat"
|
||||||
|
|
||||||
|
engine_args = EngineArgs(
|
||||||
|
model=model_name,
|
||||||
|
max_model_len=4096,
|
||||||
|
max_num_seqs=2,
|
||||||
|
limit_mm_per_prompt={"audio": audio_count},
|
||||||
|
enforce_eager=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
audio_in_prompt = "".join(
|
||||||
|
["<|audio_bos|><|AUDIO|><|audio_eos|>\n" for _ in range(audio_count)]
|
||||||
|
)
|
||||||
|
prompt = f"{audio_in_prompt}{question}"
|
||||||
|
|
||||||
|
return ModelRequestData(
|
||||||
|
engine_args=engine_args,
|
||||||
|
prompt=prompt,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Granite Speech
|
# Granite Speech
|
||||||
def run_granite_speech(question: str, audio_count: int) -> ModelRequestData:
|
def run_granite_speech(question: str, audio_count: int) -> ModelRequestData:
|
||||||
# NOTE - the setting in this example are somewhat different from what is
|
# NOTE - the setting in this example are somewhat different from what is
|
||||||
@@ -305,6 +330,25 @@ def run_qwen2_5_omni(question: str, audio_count: int):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_qwen3_asr(question: str, audio_count: int) -> ModelRequestData:
|
||||||
|
model_name = "Qwen/Qwen3-Asr-1.7B"
|
||||||
|
|
||||||
|
audio_in_prompt = "<|audio_start|><|audio_pad|><|audio_end|>\n" * audio_count
|
||||||
|
prompt = f"<|im_start|>user\n{audio_in_prompt}<|im_end|>\n<|im_start|>assistant\n"
|
||||||
|
|
||||||
|
engine_args = EngineArgs(
|
||||||
|
model=model_name,
|
||||||
|
max_model_len=4096,
|
||||||
|
max_num_seqs=5,
|
||||||
|
limit_mm_per_prompt={"audio": audio_count},
|
||||||
|
)
|
||||||
|
|
||||||
|
return ModelRequestData(
|
||||||
|
engine_args=engine_args,
|
||||||
|
prompt=prompt,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Ultravox 0.5-1B
|
# Ultravox 0.5-1B
|
||||||
def run_ultravox(question: str, audio_count: int) -> ModelRequestData:
|
def run_ultravox(question: str, audio_count: int) -> ModelRequestData:
|
||||||
model_name = "fixie-ai/ultravox-v0_5-llama-3_2-1b"
|
model_name = "fixie-ai/ultravox-v0_5-llama-3_2-1b"
|
||||||
@@ -410,12 +454,14 @@ model_example_map = {
|
|||||||
"audioflamingo3": run_audioflamingo3,
|
"audioflamingo3": run_audioflamingo3,
|
||||||
"gemma3n": run_gemma3n,
|
"gemma3n": run_gemma3n,
|
||||||
"glmasr": run_glmasr,
|
"glmasr": run_glmasr,
|
||||||
|
"funaudiochat": run_funaudiochat,
|
||||||
"granite_speech": run_granite_speech,
|
"granite_speech": run_granite_speech,
|
||||||
"midashenglm": run_midashenglm,
|
"midashenglm": run_midashenglm,
|
||||||
"minicpmo": run_minicpmo,
|
"minicpmo": run_minicpmo,
|
||||||
"phi4_mm": run_phi4mm,
|
"phi4_mm": run_phi4mm,
|
||||||
"qwen2_audio": run_qwen2_audio,
|
"qwen2_audio": run_qwen2_audio,
|
||||||
"qwen2_5_omni": run_qwen2_5_omni,
|
"qwen2_5_omni": run_qwen2_5_omni,
|
||||||
|
"qwen3_asr": run_qwen3_asr,
|
||||||
"ultravox": run_ultravox,
|
"ultravox": run_ultravox,
|
||||||
"voxtral": run_voxtral,
|
"voxtral": run_voxtral,
|
||||||
"whisper": run_whisper,
|
"whisper": run_whisper,
|
||||||
@@ -435,6 +481,12 @@ def parse_args():
|
|||||||
choices=model_example_map.keys(),
|
choices=model_example_map.keys(),
|
||||||
help='Huggingface "model_type".',
|
help='Huggingface "model_type".',
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--model",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="Model ID or local path override. Required for funaudiochat.",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--num-prompts", type=int, default=1, help="Number of prompts to run."
|
"--num-prompts", type=int, default=1, help="Number of prompts to run."
|
||||||
)
|
)
|
||||||
@@ -467,6 +519,9 @@ def main(args):
|
|||||||
if model not in model_example_map:
|
if model not in model_example_map:
|
||||||
raise ValueError(f"Model type {model} is not supported.")
|
raise ValueError(f"Model type {model} is not supported.")
|
||||||
|
|
||||||
|
if model == "funaudiochat" and not args.model:
|
||||||
|
raise ValueError("--model is required when --model-type=funaudiochat")
|
||||||
|
|
||||||
if args.tensor_parallel_size is not None and args.tensor_parallel_size < 1:
|
if args.tensor_parallel_size is not None and args.tensor_parallel_size < 1:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"tensor_parallel_size must be a positive integer, "
|
f"tensor_parallel_size must be a positive integer, "
|
||||||
@@ -477,6 +532,8 @@ def main(args):
|
|||||||
req_data = model_example_map[model](
|
req_data = model_example_map[model](
|
||||||
question_per_audio_count[audio_count], audio_count
|
question_per_audio_count[audio_count], audio_count
|
||||||
)
|
)
|
||||||
|
if model == "funaudiochat":
|
||||||
|
req_data.engine_args.model = args.model
|
||||||
|
|
||||||
# Disable other modalities to save memory
|
# Disable other modalities to save memory
|
||||||
default_limits = {"image": 0, "video": 0, "audio": 0}
|
default_limits = {"image": 0, "video": 0, "audio": 0}
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ from vllm import LLM, SamplingParams
|
|||||||
# A prompt containing a large markdown table. The table is randomly generated by GPT-4.
|
# A prompt containing a large markdown table. The table is randomly generated by GPT-4.
|
||||||
LONG_PROMPT = (
|
LONG_PROMPT = (
|
||||||
"You are a helpful assistant in recognizes the content of tables in markdown format. Here is a table as follows.\n# Table\n"
|
"You are a helpful assistant in recognizes the content of tables in markdown format. Here is a table as follows.\n# Table\n"
|
||||||
+ """
|
"""
|
||||||
| ID | Name | Age | Occupation | Country | Email | Phone Number | Address |
|
| ID | Name | Age | Occupation | Country | Email | Phone Number | Address |
|
||||||
|-----|---------------|-----|---------------|---------------|------------------------|----------------|------------------------------|
|
|-----|---------------|-----|---------------|---------------|------------------------|----------------|------------------------------|
|
||||||
| 1 | John Doe | 29 | Engineer | USA | john.doe@example.com | 555-1234 | 123 Elm St, Springfield, IL |
|
| 1 | John Doe | 29 | Engineer | USA | john.doe@example.com | 555-1234 | 123 Elm St, Springfield, IL |
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
"""
|
"""
|
||||||
This example shows how to use vLLM for running offline inference
|
This example shows how to use vLLM for running offline inference
|
||||||
with the correct prompt format on Qwen2.5-Omni (thinker only).
|
with the correct prompt format on Qwen3-Omni (thinker only).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from typing import NamedTuple
|
from typing import NamedTuple
|
||||||
@@ -112,23 +112,51 @@ def get_multi_audios_query() -> QueryResult:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_multi_images_query() -> QueryResult:
|
||||||
|
question = "What are the differences between these two images?"
|
||||||
|
prompt = (
|
||||||
|
f"<|im_start|>system\n{default_system}<|im_end|>\n"
|
||||||
|
"<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>"
|
||||||
|
"<|vision_start|><|image_pad|><|vision_end|>"
|
||||||
|
f"{question}<|im_end|>\n"
|
||||||
|
f"<|im_start|>assistant\n"
|
||||||
|
)
|
||||||
|
return QueryResult(
|
||||||
|
inputs={
|
||||||
|
"prompt": prompt,
|
||||||
|
"multi_modal_data": {
|
||||||
|
"image": [
|
||||||
|
convert_image_mode(ImageAsset("cherry_blossom").pil_image, "RGB"),
|
||||||
|
convert_image_mode(ImageAsset("stop_sign").pil_image, "RGB"),
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
limit_mm_per_prompt={
|
||||||
|
"image": 2,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
query_map = {
|
query_map = {
|
||||||
"mixed_modalities": get_mixed_modalities_query,
|
"mixed_modalities": get_mixed_modalities_query,
|
||||||
"use_audio_in_video": get_use_audio_in_video_query,
|
"use_audio_in_video": get_use_audio_in_video_query,
|
||||||
"multi_audios": get_multi_audios_query,
|
"multi_audios": get_multi_audios_query,
|
||||||
|
"multi_images": get_multi_images_query,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
model_name = "Qwen/Qwen3-Omni-30B-A3B-Instruct"
|
model_name = args.model
|
||||||
query_result = query_map[args.query_type]()
|
query_result = query_map[args.query_type]()
|
||||||
|
|
||||||
llm = LLM(
|
llm = LLM(
|
||||||
model=model_name,
|
model=model_name,
|
||||||
max_model_len=12800,
|
max_model_len=args.max_model_len,
|
||||||
max_num_seqs=5,
|
max_num_seqs=5,
|
||||||
limit_mm_per_prompt=query_result.limit_mm_per_prompt,
|
limit_mm_per_prompt=query_result.limit_mm_per_prompt,
|
||||||
seed=args.seed,
|
seed=args.seed,
|
||||||
|
tensor_parallel_size=args.tensor_parallel_size,
|
||||||
|
gpu_memory_utilization=args.gpu_memory_utilization,
|
||||||
)
|
)
|
||||||
|
|
||||||
# We set temperature to 0.2 so that outputs can be different
|
# We set temperature to 0.2 so that outputs can be different
|
||||||
@@ -161,6 +189,31 @@ def parse_args():
|
|||||||
default=0,
|
default=0,
|
||||||
help="Set the seed when initializing `vllm.LLM`.",
|
help="Set the seed when initializing `vllm.LLM`.",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--model",
|
||||||
|
type=str,
|
||||||
|
default="Qwen/Qwen3-Omni-30B-A3B-Instruct",
|
||||||
|
help="Model name or path.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--tensor-parallel-size",
|
||||||
|
"-tp",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="Tensor parallel size for distributed inference.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--gpu-memory-utilization",
|
||||||
|
type=float,
|
||||||
|
default=0.9,
|
||||||
|
help="GPU memory utilization (0.0 to 1.0).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-model-len",
|
||||||
|
type=int,
|
||||||
|
default=12800,
|
||||||
|
help="Maximum model context length.",
|
||||||
|
)
|
||||||
|
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user