Compare commits
3 Commits
v0.17.1
...
v0.8.5.pos
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3015d5634e | ||
|
|
edb5286ea5 | ||
|
|
ba41cc90e8 |
@@ -1,20 +1,19 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import zipfile
|
import zipfile
|
||||||
|
|
||||||
# Read the VLLM_MAX_SIZE_MB environment variable, defaulting to 500 MiB
|
# Read the VLLM_MAX_SIZE_MB environment variable, defaulting to 400 MiB
|
||||||
# Note that we have 800 MiB quota, please use it wisely.
|
# Note that we have 400 MiB quota, please use it wisely.
|
||||||
# See https://github.com/pypi/support/issues/6326 .
|
# See https://github.com/pypi/support/issues/3792 .
|
||||||
# Please also sync the value with the one in Dockerfile.
|
# Please also sync the value with the one in Dockerfile.
|
||||||
VLLM_MAX_SIZE_MB = int(os.environ.get("VLLM_MAX_SIZE_MB", 500))
|
VLLM_MAX_SIZE_MB = int(os.environ.get('VLLM_MAX_SIZE_MB', 400))
|
||||||
|
|
||||||
|
|
||||||
def print_top_10_largest_files(zip_file):
|
def print_top_10_largest_files(zip_file):
|
||||||
"""Print the top 10 largest files in the given zip file."""
|
"""Print the top 10 largest files in the given zip file."""
|
||||||
with zipfile.ZipFile(zip_file, "r") as z:
|
with zipfile.ZipFile(zip_file, 'r') as z:
|
||||||
file_sizes = [(f, z.getinfo(f).file_size) for f in z.namelist()]
|
file_sizes = [(f, z.getinfo(f).file_size) for f in z.namelist()]
|
||||||
file_sizes.sort(key=lambda x: x[1], reverse=True)
|
file_sizes.sort(key=lambda x: x[1], reverse=True)
|
||||||
for f, size in file_sizes[:10]:
|
for f, size in file_sizes[:10]:
|
||||||
@@ -29,18 +28,14 @@ def check_wheel_size(directory):
|
|||||||
wheel_path = os.path.join(root, file_name)
|
wheel_path = os.path.join(root, file_name)
|
||||||
wheel_size_mb = os.path.getsize(wheel_path) / (1024 * 1024)
|
wheel_size_mb = os.path.getsize(wheel_path) / (1024 * 1024)
|
||||||
if wheel_size_mb > VLLM_MAX_SIZE_MB:
|
if wheel_size_mb > VLLM_MAX_SIZE_MB:
|
||||||
print(
|
print(f"Not allowed: Wheel {wheel_path} is larger "
|
||||||
f"Not allowed: Wheel {wheel_path} is larger "
|
f"({wheel_size_mb:.2f} MB) than the limit "
|
||||||
f"({wheel_size_mb:.2f} MB) than the limit "
|
f"({VLLM_MAX_SIZE_MB} MB).")
|
||||||
f"({VLLM_MAX_SIZE_MB} MB)."
|
|
||||||
)
|
|
||||||
print_top_10_largest_files(wheel_path)
|
print_top_10_largest_files(wheel_path)
|
||||||
return 1
|
return 1
|
||||||
else:
|
else:
|
||||||
print(
|
print(f"Wheel {wheel_path} is within the allowed size "
|
||||||
f"Wheel {wheel_path} is within the allowed size "
|
f"({wheel_size_mb:.2f} MB).")
|
||||||
f"({wheel_size_mb:.2f} MB)."
|
|
||||||
)
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
@@ -50,4 +45,4 @@ if __name__ == "__main__":
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
directory = sys.argv[1]
|
directory = sys.argv[1]
|
||||||
sys.exit(check_wheel_size(directory))
|
sys.exit(check_wheel_size(directory))
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
name: vllm_ci
|
|
||||||
job_dirs:
|
|
||||||
- ".buildkite/image_build"
|
|
||||||
- ".buildkite/test_areas"
|
|
||||||
- ".buildkite/hardware_tests"
|
|
||||||
run_all_patterns:
|
|
||||||
- "docker/Dockerfile"
|
|
||||||
- "CMakeLists.txt"
|
|
||||||
- "requirements/common.txt"
|
|
||||||
- "requirements/cuda.txt"
|
|
||||||
- "requirements/build.txt"
|
|
||||||
- "requirements/test.txt"
|
|
||||||
- "setup.py"
|
|
||||||
- "csrc/"
|
|
||||||
- "cmake/"
|
|
||||||
run_all_exclude_patterns:
|
|
||||||
- "docker/Dockerfile."
|
|
||||||
- "csrc/cpu/"
|
|
||||||
- "csrc/rocm/"
|
|
||||||
- "cmake/hipify.py"
|
|
||||||
- "cmake/cpu_extension.cmake"
|
|
||||||
registries: public.ecr.aws/q9t5s3a7
|
|
||||||
repositories:
|
|
||||||
main: "vllm-ci-postmerge-repo"
|
|
||||||
premerge: "vllm-ci-test-repo"
|
|
||||||
26
.buildkite/generate_index.py
Normal file
26
.buildkite/generate_index.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
|
||||||
|
template = """<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<body>
|
||||||
|
<h1>Links for vLLM</h1/>
|
||||||
|
<a href="../{wheel_html_escaped}">{wheel}</a><br/>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--wheel", help="The wheel path.", required=True)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
filename = os.path.basename(args.wheel)
|
||||||
|
|
||||||
|
with open("index.html", "w") as f:
|
||||||
|
print(f"Generated index.html for {args.wheel}")
|
||||||
|
# cloudfront requires escaping the '+' character
|
||||||
|
f.write(
|
||||||
|
template.format(wheel=filename,
|
||||||
|
wheel_html_escaped=filename.replace("+", "%2B")))
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
group: Hardware - AMD Build
|
|
||||||
steps:
|
|
||||||
- label: "AMD: :docker: build image"
|
|
||||||
key: image-build-amd
|
|
||||||
depends_on: []
|
|
||||||
device: amd_cpu
|
|
||||||
no_plugin: true
|
|
||||||
commands:
|
|
||||||
- >
|
|
||||||
docker build
|
|
||||||
--build-arg max_jobs=16
|
|
||||||
--build-arg REMOTE_VLLM=1
|
|
||||||
--build-arg ARG_PYTORCH_ROCM_ARCH='gfx942;gfx950'
|
|
||||||
--build-arg VLLM_BRANCH=$BUILDKITE_COMMIT
|
|
||||||
--tag "rocm/vllm-ci:${BUILDKITE_COMMIT}"
|
|
||||||
-f docker/Dockerfile.rocm
|
|
||||||
--target test
|
|
||||||
--no-cache
|
|
||||||
--progress plain .
|
|
||||||
- docker push "rocm/vllm-ci:${BUILDKITE_COMMIT}"
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
retry:
|
|
||||||
automatic:
|
|
||||||
- exit_status: -1 # Agent was lost
|
|
||||||
limit: 1
|
|
||||||
- exit_status: -10 # Agent was lost
|
|
||||||
limit: 1
|
|
||||||
- exit_status: 1 # Machine occasionally fail
|
|
||||||
limit: 1
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
group: Hardware
|
|
||||||
depends_on: ~
|
|
||||||
steps:
|
|
||||||
- label: "Ascend NPU Test"
|
|
||||||
soft_fail: true
|
|
||||||
timeout_in_minutes: 20
|
|
||||||
no_plugin: true
|
|
||||||
device: ascend_npu
|
|
||||||
commands:
|
|
||||||
- bash .buildkite/scripts/hardware_ci/run-npu-test.sh
|
|
||||||
@@ -1,100 +0,0 @@
|
|||||||
group: CPU
|
|
||||||
depends_on: []
|
|
||||||
steps:
|
|
||||||
- label: CPU-Kernel Tests
|
|
||||||
depends_on: []
|
|
||||||
soft_fail: true
|
|
||||||
device: intel_cpu
|
|
||||||
no_plugin: true
|
|
||||||
source_file_dependencies:
|
|
||||||
- csrc/cpu/
|
|
||||||
- cmake/cpu_extension.cmake
|
|
||||||
- CMakeLists.txt
|
|
||||||
- vllm/_custom_ops.py
|
|
||||||
- tests/kernels/attention/test_cpu_attn.py
|
|
||||||
- tests/kernels/moe/test_cpu_fused_moe.py
|
|
||||||
- tests/kernels/test_onednn.py
|
|
||||||
commands:
|
|
||||||
- |
|
|
||||||
bash .buildkite/scripts/hardware_ci/run-cpu-test.sh 20m "
|
|
||||||
pytest -x -v -s tests/kernels/attention/test_cpu_attn.py
|
|
||||||
pytest -x -v -s tests/kernels/moe/test_cpu_fused_moe.py
|
|
||||||
pytest -x -v -s tests/kernels/test_onednn.py"
|
|
||||||
|
|
||||||
- label: CPU-Language Generation and Pooling Model Tests
|
|
||||||
depends_on: []
|
|
||||||
soft_fail: true
|
|
||||||
device: intel_cpu
|
|
||||||
no_plugin: true
|
|
||||||
source_file_dependencies:
|
|
||||||
- csrc/cpu/
|
|
||||||
- vllm/
|
|
||||||
- tests/models/language/generation/
|
|
||||||
- tests/models/language/pooling/
|
|
||||||
commands:
|
|
||||||
- |
|
|
||||||
bash .buildkite/scripts/hardware_ci/run-cpu-test.sh 30m "
|
|
||||||
pytest -x -v -s tests/models/language/generation -m cpu_model
|
|
||||||
pytest -x -v -s tests/models/language/pooling -m cpu_model"
|
|
||||||
|
|
||||||
- label: CPU-Quantization Model Tests
|
|
||||||
depends_on: []
|
|
||||||
soft_fail: true
|
|
||||||
device: intel_cpu
|
|
||||||
no_plugin: true
|
|
||||||
source_file_dependencies:
|
|
||||||
- csrc/cpu/
|
|
||||||
- vllm/model_executor/layers/quantization/cpu_wna16.py
|
|
||||||
- vllm/model_executor/layers/quantization/gptq_marlin.py
|
|
||||||
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py
|
|
||||||
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py
|
|
||||||
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cpu.py
|
|
||||||
- tests/quantization/test_compressed_tensors.py
|
|
||||||
- tests/quantization/test_cpu_wna16.py
|
|
||||||
commands:
|
|
||||||
- |
|
|
||||||
bash .buildkite/scripts/hardware_ci/run-cpu-test.sh 20m "
|
|
||||||
pytest -x -v -s tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_logprobs
|
|
||||||
pytest -x -v -s tests/quantization/test_cpu_wna16.py"
|
|
||||||
|
|
||||||
- label: CPU-Distributed Tests
|
|
||||||
depends_on: []
|
|
||||||
soft_fail: true
|
|
||||||
device: intel_cpu
|
|
||||||
no_plugin: true
|
|
||||||
source_file_dependencies:
|
|
||||||
- csrc/cpu/shm.cpp
|
|
||||||
- vllm/v1/worker/cpu_worker.py
|
|
||||||
- vllm/v1/worker/gpu_worker.py
|
|
||||||
- vllm/v1/worker/cpu_model_runner.py
|
|
||||||
- vllm/v1/worker/gpu_model_runner.py
|
|
||||||
- vllm/platforms/cpu.py
|
|
||||||
- vllm/distributed/parallel_state.py
|
|
||||||
- vllm/distributed/device_communicators/cpu_communicator.py
|
|
||||||
commands:
|
|
||||||
- |
|
|
||||||
bash .buildkite/scripts/hardware_ci/run-cpu-test.sh 10m "
|
|
||||||
bash .buildkite/scripts/hardware_ci/run-cpu-distributed-smoke-test.sh"
|
|
||||||
|
|
||||||
- label: CPU-Multi-Modal Model Tests %N
|
|
||||||
depends_on: []
|
|
||||||
soft_fail: true
|
|
||||||
device: intel_cpu
|
|
||||||
no_plugin: true
|
|
||||||
source_file_dependencies:
|
|
||||||
# - vllm/
|
|
||||||
- vllm/model_executor/layers/rotary_embedding
|
|
||||||
- tests/models/multimodal/generation/
|
|
||||||
commands:
|
|
||||||
- |
|
|
||||||
bash .buildkite/scripts/hardware_ci/run-cpu-test.sh 45m "
|
|
||||||
pytest -x -v -s tests/models/multimodal/generation --ignore=tests/models/multimodal/generation/test_pixtral.py -m cpu_model --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --shard-id=$$BUILDKITE_PARALLEL_JOB"
|
|
||||||
parallelism: 2
|
|
||||||
|
|
||||||
- label: "Arm CPU Test"
|
|
||||||
depends_on: []
|
|
||||||
soft_fail: true
|
|
||||||
device: arm_cpu
|
|
||||||
no_plugin: true
|
|
||||||
commands:
|
|
||||||
- bash .buildkite/scripts/hardware_ci/run-cpu-test-arm.sh
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
group: Hardware
|
|
||||||
steps:
|
|
||||||
- label: "GH200 Test"
|
|
||||||
soft_fail: true
|
|
||||||
device: gh200
|
|
||||||
no_plugin: true
|
|
||||||
optional: true
|
|
||||||
commands:
|
|
||||||
- nvidia-smi
|
|
||||||
- bash .buildkite/scripts/hardware_ci/run-gh200-test.sh
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
group: Hardware
|
|
||||||
depends_on: ~
|
|
||||||
steps:
|
|
||||||
- label: "Intel HPU Test"
|
|
||||||
soft_fail: true
|
|
||||||
device: intel_hpu
|
|
||||||
no_plugin: true
|
|
||||||
commands:
|
|
||||||
- bash .buildkite/scripts/hardware_ci/run-hpu-test.sh
|
|
||||||
|
|
||||||
- label: "Intel GPU Test"
|
|
||||||
depends_on: []
|
|
||||||
soft_fail: true
|
|
||||||
device: intel_gpu
|
|
||||||
no_plugin: true
|
|
||||||
commands:
|
|
||||||
- bash .buildkite/scripts/hardware_ci/run-xpu-test.sh
|
|
||||||
@@ -1,255 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# replace invalid characters in Docker image tags and truncate to 128 chars
|
|
||||||
clean_docker_tag() {
|
|
||||||
local input="$1"
|
|
||||||
echo "$input" | sed 's/[^a-zA-Z0-9._-]/_/g' | cut -c1-128
|
|
||||||
}
|
|
||||||
|
|
||||||
print_usage_and_exit() {
|
|
||||||
echo "Usage: $0 <registry> <repo> <commit> <branch> <image_tag> [<image_tag_latest>]"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
print_instance_info() {
|
|
||||||
echo ""
|
|
||||||
echo "=== Debug: Instance Information ==="
|
|
||||||
# Get IMDSv2 token
|
|
||||||
if TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" \
|
|
||||||
-H "X-aws-ec2-metadata-token-ttl-seconds: 21600" 2>/dev/null); then
|
|
||||||
AMI_ID=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" \
|
|
||||||
http://169.254.169.254/latest/meta-data/ami-id 2>/dev/null || echo "unknown")
|
|
||||||
INSTANCE_TYPE=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" \
|
|
||||||
http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null || echo "unknown")
|
|
||||||
INSTANCE_ID=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" \
|
|
||||||
http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null || echo "unknown")
|
|
||||||
AZ=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" \
|
|
||||||
http://169.254.169.254/latest/meta-data/placement/availability-zone 2>/dev/null || echo "unknown")
|
|
||||||
echo "AMI ID: ${AMI_ID}"
|
|
||||||
echo "Instance Type: ${INSTANCE_TYPE}"
|
|
||||||
echo "Instance ID: ${INSTANCE_ID}"
|
|
||||||
echo "AZ: ${AZ}"
|
|
||||||
else
|
|
||||||
echo "Not running on EC2 or IMDS not available"
|
|
||||||
fi
|
|
||||||
# Check for warm cache AMI (marker file baked into custom AMI)
|
|
||||||
if [[ -f /etc/vllm-ami-info ]]; then
|
|
||||||
echo "Cache: warm (custom vLLM AMI)"
|
|
||||||
cat /etc/vllm-ami-info
|
|
||||||
else
|
|
||||||
echo "Cache: cold (standard AMI)"
|
|
||||||
fi
|
|
||||||
echo "==================================="
|
|
||||||
echo ""
|
|
||||||
}
|
|
||||||
|
|
||||||
setup_buildx_builder() {
|
|
||||||
echo "--- :buildkite: Setting up buildx builder"
|
|
||||||
if [[ -S "${BUILDKIT_SOCKET}" ]]; then
|
|
||||||
# Custom AMI with standalone buildkitd - use remote driver for warm cache
|
|
||||||
echo "✅ Found local buildkitd socket at ${BUILDKIT_SOCKET}"
|
|
||||||
echo "Using remote driver to connect to buildkitd (warm cache available)"
|
|
||||||
if docker buildx inspect baked-vllm-builder >/dev/null 2>&1; then
|
|
||||||
echo "Using existing baked-vllm-builder"
|
|
||||||
docker buildx use baked-vllm-builder
|
|
||||||
else
|
|
||||||
echo "Creating baked-vllm-builder with remote driver"
|
|
||||||
docker buildx create \
|
|
||||||
--name baked-vllm-builder \
|
|
||||||
--driver remote \
|
|
||||||
--use \
|
|
||||||
"unix://${BUILDKIT_SOCKET}"
|
|
||||||
fi
|
|
||||||
docker buildx inspect --bootstrap
|
|
||||||
elif docker buildx inspect "${BUILDER_NAME}" >/dev/null 2>&1; then
|
|
||||||
# Existing builder available
|
|
||||||
echo "Using existing builder: ${BUILDER_NAME}"
|
|
||||||
docker buildx use "${BUILDER_NAME}"
|
|
||||||
docker buildx inspect --bootstrap
|
|
||||||
else
|
|
||||||
# No local buildkitd, no existing builder - create new docker-container builder
|
|
||||||
echo "No local buildkitd found, using docker-container driver"
|
|
||||||
docker buildx create --name "${BUILDER_NAME}" --driver docker-container --use
|
|
||||||
docker buildx inspect --bootstrap
|
|
||||||
fi
|
|
||||||
|
|
||||||
# builder info
|
|
||||||
echo "Active builder:"
|
|
||||||
docker buildx ls | grep -E '^\*|^NAME' || docker buildx ls
|
|
||||||
}
|
|
||||||
|
|
||||||
check_and_skip_if_image_exists() {
|
|
||||||
if [[ -n "${IMAGE_TAG:-}" ]]; then
|
|
||||||
echo "--- :mag: Checking if image exists"
|
|
||||||
if docker manifest inspect "${IMAGE_TAG}" >/dev/null 2>&1; then
|
|
||||||
echo "Image already exists: ${IMAGE_TAG}"
|
|
||||||
echo "Skipping build"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
echo "Image not found, proceeding with build"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
ecr_login() {
|
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin "$REGISTRY"
|
|
||||||
aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin 936637512419.dkr.ecr.us-east-1.amazonaws.com
|
|
||||||
}
|
|
||||||
|
|
||||||
prepare_cache_tags() {
|
|
||||||
# resolve and set: CACHE_TO, CACHE_FROM, CACHE_FROM_BASE_BRANCH, CACHE_FROM_MAIN
|
|
||||||
TEST_CACHE_ECR="936637512419.dkr.ecr.us-east-1.amazonaws.com/vllm-ci-test-cache"
|
|
||||||
MAIN_CACHE_ECR="936637512419.dkr.ecr.us-east-1.amazonaws.com/vllm-ci-postmerge-cache"
|
|
||||||
|
|
||||||
if [[ "$BUILDKITE_PULL_REQUEST" == "false" ]]; then
|
|
||||||
if [[ "$BUILDKITE_BRANCH" == "main" ]]; then
|
|
||||||
cache="${MAIN_CACHE_ECR}:latest"
|
|
||||||
else
|
|
||||||
clean_branch=$(clean_docker_tag "$BUILDKITE_BRANCH")
|
|
||||||
cache="${TEST_CACHE_ECR}:${clean_branch}"
|
|
||||||
fi
|
|
||||||
CACHE_TO="$cache"
|
|
||||||
CACHE_FROM="$cache"
|
|
||||||
CACHE_FROM_BASE_BRANCH="$cache"
|
|
||||||
else
|
|
||||||
CACHE_TO="${TEST_CACHE_ECR}:pr-${BUILDKITE_PULL_REQUEST}"
|
|
||||||
CACHE_FROM="${TEST_CACHE_ECR}:pr-${BUILDKITE_PULL_REQUEST}"
|
|
||||||
if [[ "$BUILDKITE_PULL_REQUEST_BASE_BRANCH" == "main" ]]; then
|
|
||||||
CACHE_FROM_BASE_BRANCH="${MAIN_CACHE_ECR}:latest"
|
|
||||||
else
|
|
||||||
clean_base=$(clean_docker_tag "$BUILDKITE_PULL_REQUEST_BASE_BRANCH")
|
|
||||||
CACHE_FROM_BASE_BRANCH="${TEST_CACHE_ECR}:${clean_base}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
CACHE_FROM_MAIN="${MAIN_CACHE_ECR}:latest"
|
|
||||||
export CACHE_TO CACHE_FROM CACHE_FROM_BASE_BRANCH CACHE_FROM_MAIN
|
|
||||||
}
|
|
||||||
|
|
||||||
resolve_parent_commit() {
|
|
||||||
if [[ -z "${PARENT_COMMIT:-}" ]]; then
|
|
||||||
PARENT_COMMIT=$(git rev-parse HEAD~1 2>/dev/null || echo "")
|
|
||||||
if [[ -n "${PARENT_COMMIT}" ]]; then
|
|
||||||
echo "Computed parent commit for cache fallback: ${PARENT_COMMIT}"
|
|
||||||
export PARENT_COMMIT
|
|
||||||
else
|
|
||||||
echo "Could not determine parent commit (may be first commit in repo)"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Using provided PARENT_COMMIT: ${PARENT_COMMIT}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
print_bake_config() {
|
|
||||||
echo "--- :page_facing_up: Resolved bake configuration"
|
|
||||||
# Write to a temp directory to avoid polluting the repo root (which is the
|
|
||||||
# Docker build context). Files left in the repo root get COPY'd into the
|
|
||||||
# image and can cause duplicate artifact uploads from downstream steps.
|
|
||||||
local bake_tmp
|
|
||||||
bake_tmp="$(mktemp -d)"
|
|
||||||
BAKE_CONFIG_FILE="${bake_tmp}/bake-config-build-${BUILDKITE_BUILD_NUMBER:-local}.json"
|
|
||||||
docker buildx bake -f "${VLLM_BAKE_FILE_PATH}" -f "${CI_HCL_PATH}" --print "${TARGET}" | tee "${BAKE_CONFIG_FILE}" || true
|
|
||||||
echo "Saved bake config to ${BAKE_CONFIG_FILE}"
|
|
||||||
echo "--- :arrow_down: Uploading bake config to Buildkite"
|
|
||||||
(cd "$(dirname "${BAKE_CONFIG_FILE}")" && buildkite-agent artifact upload "$(basename "${BAKE_CONFIG_FILE}")")
|
|
||||||
}
|
|
||||||
|
|
||||||
#################################
|
|
||||||
# Main Script #
|
|
||||||
#################################
|
|
||||||
print_instance_info
|
|
||||||
|
|
||||||
if [[ $# -lt 5 ]]; then
|
|
||||||
print_usage_and_exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# input args
|
|
||||||
REGISTRY=$1
|
|
||||||
REPO=$2
|
|
||||||
BUILDKITE_COMMIT=$3
|
|
||||||
BRANCH=$4
|
|
||||||
IMAGE_TAG=$5
|
|
||||||
IMAGE_TAG_LATEST=${6:-} # only used for main branch, optional
|
|
||||||
|
|
||||||
# build config
|
|
||||||
TARGET="test-ci"
|
|
||||||
VLLM_BAKE_FILE_PATH="${VLLM_BAKE_FILE_PATH:-docker/docker-bake.hcl}"
|
|
||||||
BUILDER_NAME="${BUILDER_NAME:-vllm-builder}"
|
|
||||||
CI_HCL_URL="${CI_HCL_URL:-https://raw.githubusercontent.com/vllm-project/ci-infra/main/docker/ci.hcl}"
|
|
||||||
CI_HCL_PATH="/tmp/ci.hcl"
|
|
||||||
BUILDKIT_SOCKET="/run/buildkit/buildkitd.sock"
|
|
||||||
|
|
||||||
prepare_cache_tags
|
|
||||||
ecr_login
|
|
||||||
|
|
||||||
# Environment info (for docs and human readers)
|
|
||||||
# VLLM_CI_BRANCH - ci-infra branch to use (default: main)
|
|
||||||
# VLLM_BAKE_FILE_PATH - Path to vLLM's bake file (default: docker/docker-bake.hcl)
|
|
||||||
# BUILDER_NAME - Name for buildx builder (default: vllm-builder)
|
|
||||||
#
|
|
||||||
# Build configuration (exported as environment variables for bake):
|
|
||||||
export BUILDKITE_COMMIT
|
|
||||||
export PARENT_COMMIT
|
|
||||||
export IMAGE_TAG
|
|
||||||
export IMAGE_TAG_LATEST
|
|
||||||
export CACHE_FROM
|
|
||||||
export CACHE_FROM_BASE_BRANCH
|
|
||||||
export CACHE_FROM_MAIN
|
|
||||||
export CACHE_TO
|
|
||||||
|
|
||||||
# print args
|
|
||||||
echo "--- :mag: Arguments"
|
|
||||||
echo "REGISTRY: ${REGISTRY}"
|
|
||||||
echo "REPO: ${REPO}"
|
|
||||||
echo "BUILDKITE_COMMIT: ${BUILDKITE_COMMIT}"
|
|
||||||
echo "BRANCH: ${BRANCH}"
|
|
||||||
echo "IMAGE_TAG: ${IMAGE_TAG}"
|
|
||||||
echo "IMAGE_TAG_LATEST: ${IMAGE_TAG_LATEST}"
|
|
||||||
|
|
||||||
# print build configuration
|
|
||||||
echo "--- :mag: Build configuration"
|
|
||||||
echo "TARGET: ${TARGET}"
|
|
||||||
echo "vLLM bake file: ${VLLM_BAKE_FILE_PATH}"
|
|
||||||
echo "BUILDER_NAME: ${BUILDER_NAME}"
|
|
||||||
echo "CI_HCL_URL: ${CI_HCL_URL}"
|
|
||||||
echo "BUILDKIT_SOCKET: ${BUILDKIT_SOCKET}"
|
|
||||||
|
|
||||||
echo "--- :mag: Cache tags"
|
|
||||||
echo "CACHE_TO: ${CACHE_TO}"
|
|
||||||
echo "CACHE_FROM: ${CACHE_FROM}"
|
|
||||||
echo "CACHE_FROM_BASE_BRANCH: ${CACHE_FROM_BASE_BRANCH}"
|
|
||||||
echo "CACHE_FROM_MAIN: ${CACHE_FROM_MAIN}"
|
|
||||||
|
|
||||||
check_and_skip_if_image_exists
|
|
||||||
|
|
||||||
echo "--- :docker: Setting up Docker buildx bake"
|
|
||||||
echo "Target: ${TARGET}"
|
|
||||||
echo "vLLM bake file: ${VLLM_BAKE_FILE_PATH}"
|
|
||||||
echo "CI HCL path: ${CI_HCL_PATH}"
|
|
||||||
|
|
||||||
if [[ ! -f "${VLLM_BAKE_FILE_PATH}" ]]; then
|
|
||||||
echo "Error: vLLM bake file not found at ${VLLM_BAKE_FILE_PATH}"
|
|
||||||
echo "Make sure you're running from the vLLM repository root"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "--- :arrow_down: Downloading ci.hcl"
|
|
||||||
curl -sSfL -o "${CI_HCL_PATH}" "${CI_HCL_URL}"
|
|
||||||
echo "Downloaded to ${CI_HCL_PATH}"
|
|
||||||
|
|
||||||
if [[ ! -f "${CI_HCL_PATH}" ]]; then
|
|
||||||
echo "Error: ci.hcl not found at ${CI_HCL_PATH}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
setup_buildx_builder
|
|
||||||
|
|
||||||
resolve_parent_commit
|
|
||||||
export PARENT_COMMIT
|
|
||||||
|
|
||||||
print_bake_config
|
|
||||||
|
|
||||||
echo "--- :docker: Building ${TARGET}"
|
|
||||||
docker --debug buildx bake -f "${VLLM_BAKE_FILE_PATH}" -f "${CI_HCL_PATH}" --progress plain "${TARGET}"
|
|
||||||
|
|
||||||
echo "--- :white_check_mark: Build complete"
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
group: Abuild
|
|
||||||
steps:
|
|
||||||
- label: ":docker: Build image"
|
|
||||||
key: image-build
|
|
||||||
depends_on: []
|
|
||||||
timeout_in_minutes: 600
|
|
||||||
commands:
|
|
||||||
- if [[ "$BUILDKITE_BRANCH" == "main" ]]; then .buildkite/image_build/image_build.sh $REGISTRY $REPO $BUILDKITE_COMMIT $BRANCH $IMAGE_TAG $IMAGE_TAG_LATEST; else .buildkite/image_build/image_build.sh $REGISTRY $REPO $BUILDKITE_COMMIT $BRANCH $IMAGE_TAG; fi
|
|
||||||
retry:
|
|
||||||
automatic:
|
|
||||||
- exit_status: -1 # Agent was lost
|
|
||||||
limit: 2
|
|
||||||
- exit_status: -10 # Agent was lost
|
|
||||||
limit: 2
|
|
||||||
|
|
||||||
- label: ":docker: Build CPU image"
|
|
||||||
key: image-build-cpu
|
|
||||||
depends_on: []
|
|
||||||
commands:
|
|
||||||
- .buildkite/image_build/image_build_cpu.sh $REGISTRY $REPO $BUILDKITE_COMMIT
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
retry:
|
|
||||||
automatic:
|
|
||||||
- exit_status: -1 # Agent was lost
|
|
||||||
limit: 2
|
|
||||||
- exit_status: -10 # Agent was lost
|
|
||||||
limit: 2
|
|
||||||
|
|
||||||
- label: ":docker: Build HPU image"
|
|
||||||
soft_fail: true
|
|
||||||
depends_on: []
|
|
||||||
key: image-build-hpu
|
|
||||||
commands:
|
|
||||||
- .buildkite/image_build/image_build_hpu.sh $REGISTRY $REPO $BUILDKITE_COMMIT
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
retry:
|
|
||||||
automatic:
|
|
||||||
- exit_status: -1 # Agent was lost
|
|
||||||
limit: 2
|
|
||||||
- exit_status: -10 # Agent was lost
|
|
||||||
limit: 2
|
|
||||||
|
|
||||||
- label: ":docker: Build CPU arm64 image"
|
|
||||||
key: cpu-arm64-image-build
|
|
||||||
depends_on: []
|
|
||||||
optional: true
|
|
||||||
commands:
|
|
||||||
- .buildkite/image_build/image_build_cpu_arm64.sh $REGISTRY $REPO $BUILDKITE_COMMIT
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
retry:
|
|
||||||
automatic:
|
|
||||||
- exit_status: -1 # Agent was lost
|
|
||||||
limit: 2
|
|
||||||
- exit_status: -10 # Agent was lost
|
|
||||||
limit: 2
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [[ $# -lt 3 ]]; then
|
|
||||||
echo "Usage: $0 <registry> <repo> <commit>"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
REGISTRY=$1
|
|
||||||
REPO=$2
|
|
||||||
BUILDKITE_COMMIT=$3
|
|
||||||
|
|
||||||
# authenticate with AWS ECR
|
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin "$REGISTRY"
|
|
||||||
|
|
||||||
# skip build if image already exists
|
|
||||||
if [[ -z $(docker manifest inspect "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-cpu) ]]; then
|
|
||||||
echo "Image not found, proceeding with build..."
|
|
||||||
else
|
|
||||||
echo "Image found"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# build
|
|
||||||
docker build --file docker/Dockerfile.cpu \
|
|
||||||
--build-arg max_jobs=16 \
|
|
||||||
--build-arg buildkite_commit="$BUILDKITE_COMMIT" \
|
|
||||||
--build-arg VLLM_CPU_AVX512BF16=true \
|
|
||||||
--build-arg VLLM_CPU_AVX512VNNI=true \
|
|
||||||
--build-arg VLLM_CPU_AMXBF16=true \
|
|
||||||
--tag "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-cpu \
|
|
||||||
--target vllm-test \
|
|
||||||
--progress plain .
|
|
||||||
|
|
||||||
# push
|
|
||||||
docker push "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-cpu
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [[ $# -lt 3 ]]; then
|
|
||||||
echo "Usage: $0 <registry> <repo> <commit>"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
REGISTRY=$1
|
|
||||||
REPO=$2
|
|
||||||
BUILDKITE_COMMIT=$3
|
|
||||||
|
|
||||||
# authenticate with AWS ECR
|
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin "$REGISTRY"
|
|
||||||
|
|
||||||
# skip build if image already exists
|
|
||||||
if [[ -z $(docker manifest inspect "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-arm64-cpu) ]]; then
|
|
||||||
echo "Image not found, proceeding with build..."
|
|
||||||
else
|
|
||||||
echo "Image found"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# build
|
|
||||||
docker build --file docker/Dockerfile.cpu \
|
|
||||||
--build-arg max_jobs=16 \
|
|
||||||
--build-arg buildkite_commit="$BUILDKITE_COMMIT" \
|
|
||||||
--tag "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-arm64-cpu \
|
|
||||||
--target vllm-test \
|
|
||||||
--progress plain .
|
|
||||||
|
|
||||||
# push
|
|
||||||
docker push "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-arm64-cpu
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [[ $# -lt 3 ]]; then
|
|
||||||
echo "Usage: $0 <registry> <repo> <commit>"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
REGISTRY=$1
|
|
||||||
REPO=$2
|
|
||||||
BUILDKITE_COMMIT=$3
|
|
||||||
|
|
||||||
# authenticate with AWS ECR
|
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin "$REGISTRY"
|
|
||||||
|
|
||||||
# skip build if image already exists
|
|
||||||
if [[ -z $(docker manifest inspect "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-hpu) ]]; then
|
|
||||||
echo "Image not found, proceeding with build..."
|
|
||||||
else
|
|
||||||
echo "Image found"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# build
|
|
||||||
docker build \
|
|
||||||
--file tests/pytorch_ci_hud_benchmark/Dockerfile.hpu \
|
|
||||||
--build-arg max_jobs=16 \
|
|
||||||
--build-arg buildkite_commit="$BUILDKITE_COMMIT" \
|
|
||||||
--tag "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-hpu \
|
|
||||||
--progress plain \
|
|
||||||
https://github.com/vllm-project/vllm-gaudi.git
|
|
||||||
|
|
||||||
# push
|
|
||||||
docker push "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-hpu
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m RedHatAI/Llama-3.2-1B-Instruct-FP8 -b "auto" -l 1319 -f 5 -t 1
|
|
||||||
model_name: "RedHatAI/Llama-3.2-1B-Instruct-FP8"
|
|
||||||
tasks:
|
|
||||||
- name: "gsm8k"
|
|
||||||
metrics:
|
|
||||||
- name: "exact_match,strict-match"
|
|
||||||
value: 0.335
|
|
||||||
- name: "exact_match,flexible-extract"
|
|
||||||
value: 0.323
|
|
||||||
limit: 1319
|
|
||||||
num_fewshot: 5
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
# For hf script, without -t option (tensor parallel size).
|
|
||||||
# bash .buildkite/lm-eval-harness/run-lm-eval-chartqa-vllm-vlm-baseline.sh -m meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8 -l 100 -t 8
|
|
||||||
model_name: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
|
|
||||||
backend: "vllm-vlm"
|
|
||||||
tasks:
|
|
||||||
- name: "chartqa"
|
|
||||||
metrics:
|
|
||||||
- name: "relaxed_accuracy,none"
|
|
||||||
# TODO(zhewenl): model card is 0.90, but the actual score is 0.80.
|
|
||||||
value: 0.80
|
|
||||||
limit: 100
|
|
||||||
num_fewshot: 0
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
# For hf script, without -t option (tensor parallel size).
|
|
||||||
# bash .buildkite/lm-eval-harness/run-lm-eval-mmlupro-vllm-baseline.sh -m meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8 -l 250 -t 8 -f 5
|
|
||||||
model_name: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"
|
|
||||||
tasks:
|
|
||||||
- name: "mmlu_pro"
|
|
||||||
metrics:
|
|
||||||
- name: "exact_match,custom-extract"
|
|
||||||
value: 0.80
|
|
||||||
limit: 250 # will run on 250 * 14 subjects = 3500 samples
|
|
||||||
num_fewshot: 5
|
|
||||||
rtol: 0.05
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
model_name: "nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16"
|
|
||||||
tasks:
|
|
||||||
- name: "gsm8k"
|
|
||||||
metrics:
|
|
||||||
- name: "exact_match,strict-match"
|
|
||||||
value: 0.695
|
|
||||||
- name: "exact_match,flexible-extract"
|
|
||||||
value: 0.447
|
|
||||||
limit: 1319
|
|
||||||
num_fewshot: 5
|
|
||||||
max_model_len: 262144
|
|
||||||
enforce_eager: false
|
|
||||||
apply_chat_template: true
|
|
||||||
fewshot_as_multiturn: true
|
|
||||||
trust_remote_code: true
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
model_name: "nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-FP8"
|
|
||||||
tasks:
|
|
||||||
- name: "gsm8k"
|
|
||||||
metrics:
|
|
||||||
- name: "exact_match,strict-match"
|
|
||||||
value: 0.7142
|
|
||||||
- name: "exact_match,flexible-extract"
|
|
||||||
value: 0.4579
|
|
||||||
env_vars:
|
|
||||||
VLLM_USE_FLASHINFER_MOE_FP8: "1"
|
|
||||||
VLLM_FLASHINFER_MOE_BACKEND: "throughput"
|
|
||||||
limit: 1319
|
|
||||||
num_fewshot: 5
|
|
||||||
max_model_len: 262144
|
|
||||||
kv_cache_dtype: fp8
|
|
||||||
enforce_eager: false
|
|
||||||
apply_chat_template: true
|
|
||||||
fewshot_as_multiturn: true
|
|
||||||
trust_remote_code: true
|
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
# For vllm script, with -t option (tensor parallel size).
|
||||||
|
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m nm-testing/Qwen2-1.5B-Instruct-W8A16-Channelwise -b "auto" -l 1000 -f 5 -t 1
|
||||||
|
model_name: "nm-testing/Qwen2-1.5B-Instruct-W8A16-Channelwise"
|
||||||
|
tasks:
|
||||||
|
- name: "gsm8k"
|
||||||
|
metrics:
|
||||||
|
- name: "exact_match,strict-match"
|
||||||
|
value: 0.595
|
||||||
|
- name: "exact_match,flexible-extract"
|
||||||
|
value: 0.582
|
||||||
|
limit: 1000
|
||||||
|
num_fewshot: 5
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m Qwen/Qwen2.5-1.5B-Instruct -b auto -l 1319 -f 5 -t 1
|
|
||||||
model_name: "Qwen/Qwen2.5-1.5B-Instruct"
|
|
||||||
tasks:
|
|
||||||
- name: "gsm8k"
|
|
||||||
metrics:
|
|
||||||
- name: "exact_match,strict-match"
|
|
||||||
value: 0.54
|
|
||||||
- name: "exact_match,flexible-extract"
|
|
||||||
value: 0.59
|
|
||||||
limit: 1319
|
|
||||||
num_fewshot: 5
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
# For vllm script, with -t option (tensor parallel size)
|
|
||||||
# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m RedHatAI/Qwen2.5-VL-3B-Instruct-FP8-Dynamic -l 1319 -t 1
|
|
||||||
model_name: "RedHatAI/Qwen2.5-VL-3B-Instruct-FP8-Dynamic"
|
|
||||||
tasks:
|
|
||||||
- name: "gsm8k"
|
|
||||||
metrics:
|
|
||||||
- name: "exact_match,strict-match"
|
|
||||||
value: 0.47
|
|
||||||
- name: "exact_match,flexible-extract"
|
|
||||||
value: 0.64
|
|
||||||
limit: 1319
|
|
||||||
num_fewshot: 5
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
# For vllm script, with -t option (tensor parallel size).
|
|
||||||
# bash .buildkite/lm-eval-harness/run-lm-eval-chartqa-vllm-vlm-baseline.sh -m Qwen/Qwen2.5-VL-7B-Instruct -l 2500 -t 1
|
|
||||||
|
|
||||||
model_name: "Qwen/Qwen2.5-VL-7B-Instruct"
|
|
||||||
backend: "vllm-vlm"
|
|
||||||
tasks:
|
|
||||||
- name: "chartqa"
|
|
||||||
metrics:
|
|
||||||
- name: "relaxed_accuracy,none"
|
|
||||||
value: 0.855
|
|
||||||
limit: 2500
|
|
||||||
num_fewshot: 0
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
model_name: "Qwen/Qwen3-235B-A22B-Instruct-2507-FP8"
|
|
||||||
tasks:
|
|
||||||
- name: "mmlu_pro"
|
|
||||||
metrics:
|
|
||||||
- name: "exact_match,custom-extract"
|
|
||||||
value: 0.82
|
|
||||||
limit: 250 # will run on 250 * 14 subjects = 3500 samples
|
|
||||||
num_fewshot: 5
|
|
||||||
enforce_eager: false # we use false to speed up the eval process
|
|
||||||
kv_cache_dtype: fp8 # we use fp8 to speed up the eval process
|
|
||||||
max_model_len: 40960
|
|
||||||
apply_chat_template: true
|
|
||||||
fewshot_as_multiturn: true
|
|
||||||
gen_kwargs: "temperature=0,top_p=1,top_k=0,max_gen_toks=5632,until=<|ENDANSWER|>"
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
Qwen3-235B-A22B-Instruct-2507-FP8.yaml
|
|
||||||
NVIDIA-Nemotron-3-Nano-30B-A3B-FP8.yaml
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Meta-Llama-4-Maverick-17B-128E-Instruct-FP8.yaml
|
|
||||||
@@ -3,4 +3,3 @@ Meta-Llama-3-70B-Instruct.yaml
|
|||||||
Mixtral-8x7B-Instruct-v0.1.yaml
|
Mixtral-8x7B-Instruct-v0.1.yaml
|
||||||
Qwen2-57B-A14-Instruct.yaml
|
Qwen2-57B-A14-Instruct.yaml
|
||||||
DeepSeek-V2-Lite-Chat.yaml
|
DeepSeek-V2-Lite-Chat.yaml
|
||||||
NVIDIA-Nemotron-3-Nano-30B-A3B-BF16.yaml
|
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
Meta-Llama-4-Maverick-17B-128E-Instruct-FP8-MM.yaml
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
Qwen2.5-VL-7B-Instruct.yaml
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
Qwen2.5-1.5B-Instruct.yaml
|
|
||||||
Meta-Llama-3.2-1B-Instruct-INT8-compressed-tensors.yaml
|
|
||||||
Meta-Llama-3-8B-Instruct-nonuniform-compressed-tensors.yaml
|
|
||||||
Qwen2.5-VL-3B-Instruct-FP8-dynamic.yaml
|
|
||||||
Qwen1.5-MoE-W4A16-compressed-tensors.yaml
|
|
||||||
@@ -1,6 +1,10 @@
|
|||||||
Qwen2.5-1.5B-Instruct.yaml
|
Meta-Llama-3-8B-Instruct.yaml
|
||||||
|
Meta-Llama-3-8B-Instruct-FP8-compressed-tensors.yaml
|
||||||
Meta-Llama-3.2-1B-Instruct-INT8-compressed-tensors.yaml
|
Meta-Llama-3.2-1B-Instruct-INT8-compressed-tensors.yaml
|
||||||
Meta-Llama-3-8B-Instruct-INT8-compressed-tensors-asym.yaml
|
Meta-Llama-3-8B-Instruct-INT8-compressed-tensors-asym.yaml
|
||||||
Meta-Llama-3-8B-Instruct-nonuniform-compressed-tensors.yaml
|
Meta-Llama-3-8B-Instruct-nonuniform-compressed-tensors.yaml
|
||||||
Qwen2.5-VL-3B-Instruct-FP8-dynamic.yaml
|
Meta-Llama-3-8B-Instruct-Channelwise-compressed-tensors.yaml
|
||||||
Qwen1.5-MoE-W4A16-compressed-tensors.yaml
|
Qwen1.5-MoE-W4A16-compressed-tensors.yaml
|
||||||
|
Qwen2-1.5B-Instruct-INT8-compressed-tensors.yaml
|
||||||
|
Qwen2-1.5B-Instruct-FP8W8.yaml
|
||||||
|
Meta-Llama-3-8B-QQQ.yaml
|
||||||
|
|||||||
@@ -1,44 +0,0 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
|
|
||||||
def pytest_addoption(parser):
|
|
||||||
parser.addoption(
|
|
||||||
"--config-list-file",
|
|
||||||
action="store",
|
|
||||||
help="Path to the file listing model config YAMLs (one per line)",
|
|
||||||
)
|
|
||||||
parser.addoption(
|
|
||||||
"--tp-size",
|
|
||||||
action="store",
|
|
||||||
default="1",
|
|
||||||
help="Tensor parallel size to use for evaluation",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def config_list_file(pytestconfig, config_dir):
|
|
||||||
rel_path = pytestconfig.getoption("--config-list-file")
|
|
||||||
return config_dir / rel_path
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def tp_size(pytestconfig):
|
|
||||||
return pytestconfig.getoption("--tp-size")
|
|
||||||
|
|
||||||
|
|
||||||
def pytest_generate_tests(metafunc):
|
|
||||||
if "config_filename" in metafunc.fixturenames:
|
|
||||||
rel_path = metafunc.config.getoption("--config-list-file")
|
|
||||||
config_list_file = Path(rel_path).resolve()
|
|
||||||
config_dir = config_list_file.parent
|
|
||||||
with open(config_list_file, encoding="utf-8") as f:
|
|
||||||
configs = [
|
|
||||||
config_dir / line.strip()
|
|
||||||
for line in f
|
|
||||||
if line.strip() and not line.startswith("#")
|
|
||||||
]
|
|
||||||
metafunc.parametrize("config_filename", configs)
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# We can use this script to compute baseline accuracy on chartqa for vllm.
|
|
||||||
#
|
|
||||||
# Make sure you have lm-eval-harness installed:
|
|
||||||
# pip install "lm-eval[api]>=0.4.11"
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo``
|
|
||||||
echo "Runs lm eval harness on ChartQA using multimodal vllm."
|
|
||||||
echo "This pathway is intended to be used to create baselines for "
|
|
||||||
echo "our correctness tests in vllm's CI."
|
|
||||||
echo
|
|
||||||
echo "usage: ${0} <options>"
|
|
||||||
echo
|
|
||||||
echo " -m - huggingface stub or local directory of the model"
|
|
||||||
echo " -l - limit number of samples to run"
|
|
||||||
echo " -t - tensor parallel size to run at"
|
|
||||||
echo
|
|
||||||
}
|
|
||||||
|
|
||||||
while getopts "m:l:t:" OPT; do
|
|
||||||
case ${OPT} in
|
|
||||||
m )
|
|
||||||
MODEL="$OPTARG"
|
|
||||||
;;
|
|
||||||
l )
|
|
||||||
LIMIT="$OPTARG"
|
|
||||||
;;
|
|
||||||
t )
|
|
||||||
TP_SIZE="$OPTARG"
|
|
||||||
;;
|
|
||||||
\? )
|
|
||||||
usage
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
lm_eval --model vllm-vlm \
|
|
||||||
--model_args "pretrained=$MODEL,tensor_parallel_size=$TP_SIZE" \
|
|
||||||
--tasks chartqa \
|
|
||||||
--batch_size auto \
|
|
||||||
--apply_chat_template \
|
|
||||||
--limit "$LIMIT"
|
|
||||||
2
.buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh
Executable file → Normal file
2
.buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh
Executable file → Normal file
@@ -2,7 +2,7 @@
|
|||||||
# We can use this script to compute baseline accuracy on GSM for transformers.
|
# We can use this script to compute baseline accuracy on GSM for transformers.
|
||||||
#
|
#
|
||||||
# Make sure you have lm-eval-harness installed:
|
# Make sure you have lm-eval-harness installed:
|
||||||
# pip install "lm-eval[api]>=0.4.11"
|
# pip install lm-eval==0.4.4
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
echo``
|
echo``
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# We use this for fp8, which HF does not support.
|
# We use this for fp8, which HF does not support.
|
||||||
#
|
#
|
||||||
# Make sure you have lm-eval-harness installed:
|
# Make sure you have lm-eval-harness installed:
|
||||||
# pip install "lm-eval[api]>=0.4.11"
|
# pip install lm-eval==0.4.4
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
echo``
|
echo``
|
||||||
@@ -46,6 +46,6 @@ while getopts "m:b:l:f:t:" OPT; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
lm_eval --model vllm \
|
lm_eval --model vllm \
|
||||||
--model_args "pretrained=$MODEL,tensor_parallel_size=$TP_SIZE,add_bos_token=true,trust_remote_code=true,max_model_len=4096" \
|
--model_args "pretrained=$MODEL,tensor_parallel_size=$TP_SIZE,distributed_executor_backend=ray,trust_remote_code=true,max_model_len=4096" \
|
||||||
--tasks gsm8k --num_fewshot "$FEWSHOT" --limit "$LIMIT" \
|
--tasks gsm8k --num_fewshot "$FEWSHOT" --limit "$LIMIT" \
|
||||||
--batch_size "$BATCH_SIZE"
|
--batch_size "$BATCH_SIZE"
|
||||||
|
|||||||
@@ -1,47 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# We can use this script to compute baseline accuracy on MMLUPRO for vllm.
|
|
||||||
# We use this for fp8, which HF does not support.
|
|
||||||
#
|
|
||||||
# Make sure you have lm-eval-harness installed:
|
|
||||||
# pip install "lm-eval[api]>=0.4.11"
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo``
|
|
||||||
echo "Runs lm eval harness on MMLU Pro using huggingface transformers."
|
|
||||||
echo "This pathway is intended to be used to create baselines for "
|
|
||||||
echo "our automated nm-test-accuracy workflow"
|
|
||||||
echo
|
|
||||||
echo "usage: ${0} <options>"
|
|
||||||
echo
|
|
||||||
echo " -m - huggingface stub or local directory of the model"
|
|
||||||
echo " -l - limit number of samples to run"
|
|
||||||
echo " -f - number of fewshot samples to use"
|
|
||||||
echo " -t - tensor parallel size to run at"
|
|
||||||
echo
|
|
||||||
}
|
|
||||||
|
|
||||||
while getopts "m:l:f:t:" OPT; do
|
|
||||||
case ${OPT} in
|
|
||||||
m )
|
|
||||||
MODEL="$OPTARG"
|
|
||||||
;;
|
|
||||||
l )
|
|
||||||
LIMIT="$OPTARG"
|
|
||||||
;;
|
|
||||||
f )
|
|
||||||
FEWSHOT="$OPTARG"
|
|
||||||
;;
|
|
||||||
t )
|
|
||||||
TP_SIZE="$OPTARG"
|
|
||||||
;;
|
|
||||||
\? )
|
|
||||||
usage
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
lm_eval --model vllm \
|
|
||||||
--model_args "pretrained=$MODEL,tensor_parallel_size=$TP_SIZE,add_bos_token=true,trust_remote_code=true,max_model_len=4096" \
|
|
||||||
--tasks mmlu_pro --num_fewshot "$FEWSHOT" --limit "$LIMIT" \
|
|
||||||
--batch_size auto
|
|
||||||
59
.buildkite/lm-eval-harness/run-tests.sh
Normal file
59
.buildkite/lm-eval-harness/run-tests.sh
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo``
|
||||||
|
echo "Runs lm eval harness on GSM8k using vllm and compares to "
|
||||||
|
echo "precomputed baseline (measured by HF transformers.)"
|
||||||
|
echo
|
||||||
|
echo "usage: ${0} <options>"
|
||||||
|
echo
|
||||||
|
echo " -c - path to the test data config (e.g. configs/small-models.txt)"
|
||||||
|
echo " -t - tensor parallel size"
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
SUCCESS=0
|
||||||
|
|
||||||
|
while getopts "c:t:" OPT; do
|
||||||
|
case ${OPT} in
|
||||||
|
c )
|
||||||
|
CONFIG="$OPTARG"
|
||||||
|
;;
|
||||||
|
t )
|
||||||
|
TP_SIZE="$OPTARG"
|
||||||
|
;;
|
||||||
|
\? )
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Parse list of configs.
|
||||||
|
IFS=$'\n' read -d '' -r -a MODEL_CONFIGS < "$CONFIG"
|
||||||
|
|
||||||
|
for MODEL_CONFIG in "${MODEL_CONFIGS[@]}"
|
||||||
|
do
|
||||||
|
LOCAL_SUCCESS=0
|
||||||
|
|
||||||
|
echo "=== RUNNING MODEL: $MODEL_CONFIG WITH TP SIZE: $TP_SIZE==="
|
||||||
|
|
||||||
|
export LM_EVAL_TEST_DATA_FILE=$PWD/configs/${MODEL_CONFIG}
|
||||||
|
export LM_EVAL_TP_SIZE=$TP_SIZE
|
||||||
|
pytest -s test_lm_eval_correctness.py || LOCAL_SUCCESS=$?
|
||||||
|
|
||||||
|
if [[ $LOCAL_SUCCESS == 0 ]]; then
|
||||||
|
echo "=== PASSED MODEL: ${MODEL_CONFIG} ==="
|
||||||
|
else
|
||||||
|
echo "=== FAILED MODEL: ${MODEL_CONFIG} ==="
|
||||||
|
fi
|
||||||
|
|
||||||
|
SUCCESS=$((SUCCESS + LOCAL_SUCCESS))
|
||||||
|
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "${SUCCESS}" -eq "0" ]; then
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
@@ -1,107 +1,69 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
"""
|
"""
|
||||||
LM eval harness on model to compare vs HF baseline computed offline.
|
LM eval harness on model to compare vs HF baseline computed offline.
|
||||||
Configs are found in configs/$MODEL.yaml
|
Configs are found in configs/$MODEL.yaml
|
||||||
|
|
||||||
pytest -s -v test_lm_eval_correctness.py \
|
* export LM_EVAL_TEST_DATA_FILE=configs/Meta-Llama-3-70B-Instruct.yaml
|
||||||
--config-list-file=configs/models-small.txt \
|
* export LM_EVAL_TP_SIZE=4
|
||||||
--tp-size=1
|
* pytest -s test_lm_eval_correctness.py
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from contextlib import contextmanager
|
from pathlib import Path
|
||||||
|
|
||||||
import lm_eval
|
import lm_eval
|
||||||
import numpy as np
|
import numpy
|
||||||
|
import pytest
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
DEFAULT_RTOL = 0.08
|
RTOL = 0.08
|
||||||
|
TEST_DATA_FILE = os.environ.get(
|
||||||
|
"LM_EVAL_TEST_DATA_FILE",
|
||||||
|
".buildkite/lm-eval-harness/configs/Meta-Llama-3-8B-Instruct.yaml")
|
||||||
|
|
||||||
|
TP_SIZE = os.environ.get("LM_EVAL_TP_SIZE", 1)
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
def launch_lm_eval(eval_config):
|
||||||
def scoped_env_vars(new_env: dict[str, str]):
|
trust_remote_code = eval_config.get('trust_remote_code', False)
|
||||||
if not new_env:
|
|
||||||
# Fast path: nothing to do
|
|
||||||
yield
|
|
||||||
return
|
|
||||||
|
|
||||||
old_values = {}
|
model_args = f"pretrained={eval_config['model_name']}," \
|
||||||
new_keys = []
|
f"tensor_parallel_size={TP_SIZE}," \
|
||||||
|
f"add_bos_token=true," \
|
||||||
|
f"trust_remote_code={trust_remote_code}"
|
||||||
|
|
||||||
try:
|
results = lm_eval.simple_evaluate(
|
||||||
for key, value in new_env.items():
|
model="vllm",
|
||||||
if key in os.environ:
|
model_args=model_args,
|
||||||
old_values[key] = os.environ[key]
|
tasks=[task["name"] for task in eval_config["tasks"]],
|
||||||
else:
|
num_fewshot=eval_config["num_fewshot"],
|
||||||
new_keys.append(key)
|
limit=eval_config["limit"],
|
||||||
os.environ[key] = str(value)
|
batch_size="auto")
|
||||||
yield
|
|
||||||
finally:
|
|
||||||
# Restore / clean up
|
|
||||||
for key, value in old_values.items():
|
|
||||||
os.environ[key] = value
|
|
||||||
for key in new_keys:
|
|
||||||
os.environ.pop(key, None)
|
|
||||||
|
|
||||||
|
|
||||||
def launch_lm_eval(eval_config, tp_size):
|
|
||||||
trust_remote_code = eval_config.get("trust_remote_code", False)
|
|
||||||
max_model_len = eval_config.get("max_model_len", 4096)
|
|
||||||
batch_size = eval_config.get("batch_size", "auto")
|
|
||||||
backend = eval_config.get("backend", "vllm")
|
|
||||||
enforce_eager = eval_config.get("enforce_eager", "true")
|
|
||||||
kv_cache_dtype = eval_config.get("kv_cache_dtype", "auto")
|
|
||||||
model_args = (
|
|
||||||
f"pretrained={eval_config['model_name']},"
|
|
||||||
f"tensor_parallel_size={tp_size},"
|
|
||||||
f"enforce_eager={enforce_eager},"
|
|
||||||
f"kv_cache_dtype={kv_cache_dtype},"
|
|
||||||
f"add_bos_token=true,"
|
|
||||||
f"trust_remote_code={trust_remote_code},"
|
|
||||||
f"max_model_len={max_model_len},"
|
|
||||||
"allow_deprecated_quantization=True,"
|
|
||||||
)
|
|
||||||
|
|
||||||
env_vars = eval_config.get("env_vars", None)
|
|
||||||
with scoped_env_vars(env_vars):
|
|
||||||
results = lm_eval.simple_evaluate(
|
|
||||||
model=backend,
|
|
||||||
model_args=model_args,
|
|
||||||
tasks=[task["name"] for task in eval_config["tasks"]],
|
|
||||||
num_fewshot=eval_config["num_fewshot"],
|
|
||||||
limit=eval_config["limit"],
|
|
||||||
# TODO(yeq): using chat template w/ fewshot_as_multiturn is supposed help
|
|
||||||
# text models. however, this is regressing measured strict-match for
|
|
||||||
# existing text models in CI, so only apply it for mm, or explicitly set
|
|
||||||
apply_chat_template=eval_config.get(
|
|
||||||
"apply_chat_template", backend == "vllm-vlm"
|
|
||||||
),
|
|
||||||
fewshot_as_multiturn=eval_config.get("fewshot_as_multiturn", False),
|
|
||||||
# Forward decoding and early-stop controls (e.g., max_gen_toks, until=...)
|
|
||||||
gen_kwargs=eval_config.get("gen_kwargs"),
|
|
||||||
batch_size=batch_size,
|
|
||||||
)
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def test_lm_eval_correctness_param(config_filename, tp_size):
|
def test_lm_eval_correctness():
|
||||||
eval_config = yaml.safe_load(config_filename.read_text(encoding="utf-8"))
|
eval_config = yaml.safe_load(
|
||||||
|
Path(TEST_DATA_FILE).read_text(encoding="utf-8"))
|
||||||
|
|
||||||
results = launch_lm_eval(eval_config, tp_size)
|
if eval_config[
|
||||||
|
"model_name"] == "nm-testing/Meta-Llama-3-70B-Instruct-FBGEMM-nonuniform": #noqa: E501
|
||||||
|
pytest.skip("FBGEMM is currently failing on main.")
|
||||||
|
|
||||||
rtol = eval_config.get("rtol", DEFAULT_RTOL)
|
# Launch eval requests.
|
||||||
|
results = launch_lm_eval(eval_config)
|
||||||
|
|
||||||
|
# Confirm scores match ground truth.
|
||||||
success = True
|
success = True
|
||||||
for task in eval_config["tasks"]:
|
for task in eval_config["tasks"]:
|
||||||
for metric in task["metrics"]:
|
for metric in task["metrics"]:
|
||||||
ground_truth = metric["value"]
|
ground_truth = metric["value"]
|
||||||
measured_value = results["results"][task["name"]][metric["name"]]
|
measured_value = results["results"][task["name"]][metric["name"]]
|
||||||
print(
|
print(f'{task["name"]} | {metric["name"]}: '
|
||||||
f"{task['name']} | {metric['name']}: "
|
f'ground_truth={ground_truth} | measured={measured_value}')
|
||||||
f"ground_truth={ground_truth:.3f} | "
|
success = success and numpy.isclose(
|
||||||
f"measured={measured_value:.3f} | rtol={rtol}"
|
ground_truth, measured_value, rtol=RTOL)
|
||||||
)
|
|
||||||
success = success and np.isclose(ground_truth, measured_value, rtol=rtol)
|
|
||||||
|
|
||||||
|
# Assert at the end, print all scores even on failure for debugging.
|
||||||
assert success
|
assert success
|
||||||
|
|||||||
143
.buildkite/nightly-benchmarks/README.md
Normal file
143
.buildkite/nightly-benchmarks/README.md
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
# vLLM benchmark suite
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This directory contains two sets of benchmark for vllm.
|
||||||
|
|
||||||
|
- Performance benchmark: benchmark vllm's performance under various workload, for **developers** to gain clarity on whether their PR improves/degrades vllm's performance
|
||||||
|
- Nightly benchmark: compare vllm's performance against alternatives (tgi, trt-llm and lmdeploy), for **the public** to know when to choose vllm.
|
||||||
|
|
||||||
|
See [vLLM performance dashboard](https://perf.vllm.ai) for the latest performance benchmark results and [vLLM GitHub README](https://github.com/vllm-project/vllm/blob/main/README.md) for latest nightly benchmark results.
|
||||||
|
|
||||||
|
## Performance benchmark quick overview
|
||||||
|
|
||||||
|
**Benchmarking Coverage**: latency, throughput and fix-qps serving on A100 (the support for FP8 benchmark on H100 is coming!), with different models.
|
||||||
|
|
||||||
|
**Benchmarking Duration**: about 1hr.
|
||||||
|
|
||||||
|
**For benchmarking developers**: please try your best to constraint the duration of benchmarking to about 1 hr so that it won't take forever to run.
|
||||||
|
|
||||||
|
## Nightly benchmark quick overview
|
||||||
|
|
||||||
|
**Benchmarking Coverage**: Fix-qps serving on A100 (the support for FP8 benchmark on H100 is coming!) on Llama-3 8B, 70B and Mixtral 8x7B.
|
||||||
|
|
||||||
|
**Benchmarking engines**: vllm, TGI, trt-llm and lmdeploy.
|
||||||
|
|
||||||
|
**Benchmarking Duration**: about 3.5hrs.
|
||||||
|
|
||||||
|
## Trigger the benchmark
|
||||||
|
|
||||||
|
Performance benchmark will be triggered when:
|
||||||
|
- A PR being merged into vllm.
|
||||||
|
- Every commit for those PRs with `perf-benchmarks` label AND `ready` label.
|
||||||
|
|
||||||
|
Nightly benchmark will be triggered when:
|
||||||
|
- Every commit for those PRs with `perf-benchmarks` label and `nightly-benchmarks` label.
|
||||||
|
|
||||||
|
## Performance benchmark details
|
||||||
|
|
||||||
|
See [performance-benchmarks-descriptions.md](performance-benchmarks-descriptions.md) for detailed descriptions, and use `tests/latency-tests.json`, `tests/throughput-tests.json`, `tests/serving-tests.json` to configure the test cases.
|
||||||
|
|
||||||
|
### Latency test
|
||||||
|
|
||||||
|
Here is an example of one test inside `latency-tests.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"test_name": "latency_llama8B_tp1",
|
||||||
|
"parameters": {
|
||||||
|
"model": "meta-llama/Meta-Llama-3-8B",
|
||||||
|
"tensor_parallel_size": 1,
|
||||||
|
"load_format": "dummy",
|
||||||
|
"num_iters_warmup": 5,
|
||||||
|
"num_iters": 15
|
||||||
|
}
|
||||||
|
},
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example:
|
||||||
|
|
||||||
|
- The `test_name` attributes is a unique identifier for the test. In `latency-tests.json`, it must start with `latency_`.
|
||||||
|
- The `parameters` attribute control the command line arguments to be used for `benchmark_latency.py`. Note that please use underline `_` instead of the dash `-` when specifying the command line arguments, and `run-performance-benchmarks.sh` will convert the underline to dash when feeding the arguments to `benchmark_latency.py`. For example, the corresponding command line arguments for `benchmark_latency.py` will be `--model meta-llama/Meta-Llama-3-8B --tensor-parallel-size 1 --load-format dummy --num-iters-warmup 5 --num-iters 15`
|
||||||
|
|
||||||
|
Note that the performance numbers are highly sensitive to the value of the parameters. Please make sure the parameters are set correctly.
|
||||||
|
|
||||||
|
WARNING: The benchmarking script will save json results by itself, so please do not configure `--output-json` parameter in the json file.
|
||||||
|
|
||||||
|
### Throughput test
|
||||||
|
|
||||||
|
The tests are specified in `throughput-tests.json`. The syntax is similar to `latency-tests.json`, except for that the parameters will be fed forward to `benchmark_throughput.py`.
|
||||||
|
|
||||||
|
The number of this test is also stable -- a slight change on the value of this number might vary the performance numbers by a lot.
|
||||||
|
|
||||||
|
### Serving test
|
||||||
|
|
||||||
|
We test the throughput by using `benchmark_serving.py` with request rate = inf to cover the online serving overhead. The corresponding parameters are in `serving-tests.json`, and here is an example:
|
||||||
|
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp1_sharegpt",
|
||||||
|
"qps_list": [1, 4, 16, "inf"],
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "meta-llama/Meta-Llama-3-8B",
|
||||||
|
"tensor_parallel_size": 1,
|
||||||
|
"swap_space": 16,
|
||||||
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
|
"load_format": "dummy"
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "meta-llama/Meta-Llama-3-8B",
|
||||||
|
"backend": "vllm",
|
||||||
|
"dataset_name": "sharegpt",
|
||||||
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||||
|
"num_prompts": 200
|
||||||
|
}
|
||||||
|
},
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Inside this example:
|
||||||
|
|
||||||
|
- The `test_name` attribute is also a unique identifier for the test. It must start with `serving_`.
|
||||||
|
- The `server-parameters` includes the command line arguments for vLLM server.
|
||||||
|
- The `client-parameters` includes the command line arguments for `benchmark_serving.py`.
|
||||||
|
- The `qps_list` controls the list of qps for test. It will be used to configure the `--request-rate` parameter in `benchmark_serving.py`
|
||||||
|
|
||||||
|
The number of this test is less stable compared to the delay and latency benchmarks (due to randomized sharegpt dataset sampling inside `benchmark_serving.py`), but a large change on this number (e.g. 5% change) still vary the output greatly.
|
||||||
|
|
||||||
|
WARNING: The benchmarking script will save json results by itself, so please do not configure `--save-results` or other results-saving-related parameters in `serving-tests.json`.
|
||||||
|
|
||||||
|
### Visualizing the results
|
||||||
|
|
||||||
|
The `convert-results-json-to-markdown.py` helps you put the benchmarking results inside a markdown table, by formatting [descriptions.md](tests/descriptions.md) with real benchmarking results.
|
||||||
|
You can find the result presented as a table inside the `buildkite/performance-benchmark` job page.
|
||||||
|
If you do not see the table, please wait till the benchmark finish running.
|
||||||
|
The json version of the table (together with the json version of the benchmark) will be also attached to the markdown file.
|
||||||
|
The raw benchmarking results (in the format of json files) are in the `Artifacts` tab of the benchmarking.
|
||||||
|
|
||||||
|
## Nightly test details
|
||||||
|
|
||||||
|
See [nightly-descriptions.md](nightly-descriptions.md) for the detailed description on test workload, models and docker containers of benchmarking other llm engines.
|
||||||
|
|
||||||
|
### Workflow
|
||||||
|
|
||||||
|
- The [nightly-pipeline.yaml](nightly-pipeline.yaml) specifies the docker containers for different LLM serving engines.
|
||||||
|
- Inside each container, we run [run-nightly-suite.sh](run-nightly-suite.sh), which will probe the serving engine of the current container.
|
||||||
|
- The `run-nightly-suite.sh` will redirect the request to `tests/run-[llm serving engine name]-nightly.sh`, which parses the workload described in [nightly-tests.json](tests/nightly-tests.json) and performs the benchmark.
|
||||||
|
- At last, we run [scripts/plot-nightly-results.py](scripts/plot-nightly-results.py) to collect and plot the final benchmarking results, and update the results to buildkite.
|
||||||
|
|
||||||
|
### Nightly tests
|
||||||
|
|
||||||
|
In [nightly-tests.json](tests/nightly-tests.json), we include the command line arguments for benchmarking commands, together with the benchmarking test cases. The format is highly similar to performance benchmark.
|
||||||
|
|
||||||
|
### Docker containers
|
||||||
|
|
||||||
|
The docker containers for benchmarking are specified in `nightly-pipeline.yaml`.
|
||||||
|
|
||||||
|
WARNING: the docker versions are HARD-CODED and SHOULD BE ALIGNED WITH `nightly-descriptions.md`. The docker versions need to be hard-coded as there are several version-specific bug fixes inside `tests/run-[llm serving engine name]-nightly.sh`.
|
||||||
|
|
||||||
|
WARNING: populating `trt-llm` to latest version is not easy, as it requires updating several protobuf files in [tensorrt-demo](https://github.com/neuralmagic/tensorrt-demo.git).
|
||||||
184
.buildkite/nightly-benchmarks/benchmark-pipeline.yaml
Normal file
184
.buildkite/nightly-benchmarks/benchmark-pipeline.yaml
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
steps:
|
||||||
|
- label: "Wait for container to be ready"
|
||||||
|
key: wait-for-container-image
|
||||||
|
agents:
|
||||||
|
queue: A100
|
||||||
|
plugins:
|
||||||
|
- kubernetes:
|
||||||
|
podSpec:
|
||||||
|
containers:
|
||||||
|
- image: badouralix/curl-jq
|
||||||
|
command:
|
||||||
|
- sh .buildkite/nightly-benchmarks/scripts/wait-for-image.sh
|
||||||
|
- label: "Cleanup H100"
|
||||||
|
agents:
|
||||||
|
queue: H100
|
||||||
|
depends_on: ~
|
||||||
|
command: docker system prune -a --volumes --force
|
||||||
|
|
||||||
|
- label: "A100"
|
||||||
|
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||||
|
agents:
|
||||||
|
queue: A100
|
||||||
|
depends_on: wait-for-container-image
|
||||||
|
if: build.branch == "main"
|
||||||
|
plugins:
|
||||||
|
- kubernetes:
|
||||||
|
podSpec:
|
||||||
|
priorityClassName: perf-benchmark
|
||||||
|
containers:
|
||||||
|
- image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT
|
||||||
|
command:
|
||||||
|
- bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
nvidia.com/gpu: 8
|
||||||
|
volumeMounts:
|
||||||
|
- name: devshm
|
||||||
|
mountPath: /dev/shm
|
||||||
|
env:
|
||||||
|
- name: VLLM_USAGE_SOURCE
|
||||||
|
value: ci-test
|
||||||
|
- name: HF_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: hf-token-secret
|
||||||
|
key: token
|
||||||
|
nodeSelector:
|
||||||
|
nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
|
||||||
|
volumes:
|
||||||
|
- name: devshm
|
||||||
|
emptyDir:
|
||||||
|
medium: Memory
|
||||||
|
|
||||||
|
- label: "H200"
|
||||||
|
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||||
|
agents:
|
||||||
|
queue: H200
|
||||||
|
depends_on: wait-for-container-image
|
||||||
|
if: build.branch == "main"
|
||||||
|
plugins:
|
||||||
|
- docker#v5.12.0:
|
||||||
|
image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT
|
||||||
|
command:
|
||||||
|
- bash
|
||||||
|
- .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||||
|
mount-buildkite-agent: true
|
||||||
|
propagate-environment: true
|
||||||
|
ipc: host
|
||||||
|
gpus: 4,5,6,7
|
||||||
|
volumes:
|
||||||
|
- /data/benchmark-hf-cache:/root/.cache/huggingface
|
||||||
|
environment:
|
||||||
|
- VLLM_USAGE_SOURCE
|
||||||
|
- HF_TOKEN
|
||||||
|
|
||||||
|
#- block: "Run H100 Benchmark"
|
||||||
|
#key: block-h100
|
||||||
|
#depends_on: ~
|
||||||
|
|
||||||
|
- label: "H100"
|
||||||
|
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||||
|
agents:
|
||||||
|
queue: H100
|
||||||
|
depends_on: wait-for-container-image
|
||||||
|
if: build.branch == "main"
|
||||||
|
plugins:
|
||||||
|
- docker#v5.12.0:
|
||||||
|
image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT
|
||||||
|
command:
|
||||||
|
- bash
|
||||||
|
- .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||||
|
mount-buildkite-agent: true
|
||||||
|
propagate-environment: true
|
||||||
|
ipc: host
|
||||||
|
gpus: all # see CUDA_VISIBLE_DEVICES for actual GPUs used
|
||||||
|
volumes:
|
||||||
|
- /data/benchmark-hf-cache:/root/.cache/huggingface
|
||||||
|
environment:
|
||||||
|
- VLLM_USAGE_SOURCE
|
||||||
|
- HF_TOKEN
|
||||||
|
|
||||||
|
# Premerge benchmark
|
||||||
|
- label: "A100"
|
||||||
|
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||||
|
agents:
|
||||||
|
queue: A100
|
||||||
|
depends_on: wait-for-container-image
|
||||||
|
if: build.branch != "main"
|
||||||
|
plugins:
|
||||||
|
- kubernetes:
|
||||||
|
podSpec:
|
||||||
|
priorityClassName: perf-benchmark
|
||||||
|
containers:
|
||||||
|
- image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
|
||||||
|
command:
|
||||||
|
- bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
nvidia.com/gpu: 8
|
||||||
|
volumeMounts:
|
||||||
|
- name: devshm
|
||||||
|
mountPath: /dev/shm
|
||||||
|
env:
|
||||||
|
- name: VLLM_USAGE_SOURCE
|
||||||
|
value: ci-test
|
||||||
|
- name: HF_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: hf-token-secret
|
||||||
|
key: token
|
||||||
|
nodeSelector:
|
||||||
|
nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
|
||||||
|
volumes:
|
||||||
|
- name: devshm
|
||||||
|
emptyDir:
|
||||||
|
medium: Memory
|
||||||
|
|
||||||
|
- label: "H200"
|
||||||
|
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||||
|
agents:
|
||||||
|
queue: H200
|
||||||
|
depends_on: wait-for-container-image
|
||||||
|
if: build.branch != "main"
|
||||||
|
plugins:
|
||||||
|
- docker#v5.12.0:
|
||||||
|
image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
|
||||||
|
command:
|
||||||
|
- bash
|
||||||
|
- .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||||
|
mount-buildkite-agent: true
|
||||||
|
propagate-environment: true
|
||||||
|
ipc: host
|
||||||
|
gpus: 4,5,6,7
|
||||||
|
volumes:
|
||||||
|
- /data/benchmark-hf-cache:/root/.cache/huggingface
|
||||||
|
environment:
|
||||||
|
- VLLM_USAGE_SOURCE
|
||||||
|
- HF_TOKEN
|
||||||
|
|
||||||
|
#- block: "Run H100 Benchmark"
|
||||||
|
#key: block-h100
|
||||||
|
#depends_on: ~
|
||||||
|
|
||||||
|
- label: "H100"
|
||||||
|
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||||
|
agents:
|
||||||
|
queue: H100
|
||||||
|
depends_on: wait-for-container-image
|
||||||
|
if: build.branch != "main"
|
||||||
|
plugins:
|
||||||
|
- docker#v5.12.0:
|
||||||
|
image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
|
||||||
|
command:
|
||||||
|
- bash
|
||||||
|
- .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||||
|
mount-buildkite-agent: true
|
||||||
|
propagate-environment: true
|
||||||
|
ipc: host
|
||||||
|
gpus: all # see CUDA_VISIBLE_DEVICES for actual GPUs used
|
||||||
|
volumes:
|
||||||
|
- /data/benchmark-hf-cache:/root/.cache/huggingface
|
||||||
|
environment:
|
||||||
|
- VLLM_USAGE_SOURCE
|
||||||
|
- HF_TOKEN
|
||||||
27
.buildkite/nightly-benchmarks/nightly-annotation.md
Normal file
27
.buildkite/nightly-benchmarks/nightly-annotation.md
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
This file contains the downloading link for benchmarking results.
|
||||||
|
|
||||||
|
- [benchmarking pipeline](artifact://nightly-pipeline.yaml)
|
||||||
|
- [benchmarking results](artifact://results.zip)
|
||||||
|
- [benchmarking code](artifact://nightly-benchmarks.zip)
|
||||||
|
|
||||||
|
Please download the visualization scripts in the post
|
||||||
|
|
||||||
|
## Results reproduction
|
||||||
|
|
||||||
|
- Find the docker we use in `benchmarking pipeline`
|
||||||
|
- Deploy the docker, and inside the docker:
|
||||||
|
- Download `nightly-benchmarks.zip`.
|
||||||
|
- In the same folder, run the following code:
|
||||||
|
|
||||||
|
```console
|
||||||
|
export HF_TOKEN=<your HF token>
|
||||||
|
apt update
|
||||||
|
apt install -y git
|
||||||
|
unzip nightly-benchmarks.zip
|
||||||
|
VLLM_SOURCE_CODE_LOC=./ bash .buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
And the results will be inside `./benchmarks/results`.
|
||||||
39
.buildkite/nightly-benchmarks/nightly-descriptions.md
Normal file
39
.buildkite/nightly-benchmarks/nightly-descriptions.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
|
||||||
|
# Nightly benchmark
|
||||||
|
|
||||||
|
This benchmark aims to:
|
||||||
|
|
||||||
|
- Provide performance clarity: Provide clarity on which one (vllm, tensorrt-llm, lmdeploy and SGLang) leads in performance in what workload.
|
||||||
|
- Be reproducible: one can run the exact same set of benchmarking commands inside the exact same docker by following reproducing instructions.
|
||||||
|
|
||||||
|
Latest results: [results link](https://blog.vllm.ai/2024/09/05/perf-update.html), scroll to the end.
|
||||||
|
|
||||||
|
Latest reproduction guilde: [github issue link](https://github.com/vllm-project/vllm/issues/8176)
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
- Docker images:
|
||||||
|
- vLLM: `vllm/vllm-openai:v0.6.2`
|
||||||
|
- SGLang: `lmsysorg/sglang:v0.3.2-cu121`
|
||||||
|
- LMDeploy: `openmmlab/lmdeploy:v0.6.1-cu12`
|
||||||
|
- TensorRT-LLM: `nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3`
|
||||||
|
- *NOTE: we uses r24.07 as the current implementation only works for this version. We are going to bump this up.*
|
||||||
|
- Check [nightly-pipeline.yaml](nightly-pipeline.yaml) for the concrete docker images, specs and commands we use for the benchmark.
|
||||||
|
- Hardware
|
||||||
|
- 8x Nvidia A100 GPUs
|
||||||
|
- Workload:
|
||||||
|
- Dataset
|
||||||
|
- ShareGPT dataset
|
||||||
|
- Prefill-heavy dataset (in average 462 input tokens, 16 tokens as output)
|
||||||
|
- Decode-heavy dataset (in average 462 input tokens, 256 output tokens)
|
||||||
|
- Check [nightly-tests.json](tests/nightly-tests.json) for the concrete configuration of datasets we use.
|
||||||
|
- Models: llama-3 8B, llama-3 70B.
|
||||||
|
- We do not use llama 3.1 as it is incompatible with trt-llm r24.07. ([issue](https://github.com/NVIDIA/TensorRT-LLM/issues/2105)).
|
||||||
|
- Average QPS (query per second): 2, 4, 8, 16, 32 and inf.
|
||||||
|
- Queries are randomly sampled, and arrival patterns are determined via Poisson process, but all with fixed random seed.
|
||||||
|
- Evaluation metrics: Throughput (higher the better), TTFT (time to the first token, lower the better), ITL (inter-token latency, lower the better).
|
||||||
|
|
||||||
|
## Known issues
|
||||||
|
|
||||||
|
- TRT-LLM crashes with Llama 3.1 8B [issue](https://github.com/NVIDIA/TensorRT-LLM/issues/2105).
|
||||||
|
- TGI does not support `ignore-eos` flag.
|
||||||
196
.buildkite/nightly-benchmarks/nightly-pipeline.yaml
Normal file
196
.buildkite/nightly-benchmarks/nightly-pipeline.yaml
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
common_pod_spec: &common_pod_spec
|
||||||
|
priorityClassName: perf-benchmark
|
||||||
|
nodeSelector:
|
||||||
|
nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
|
||||||
|
volumes:
|
||||||
|
- name: devshm
|
||||||
|
emptyDir:
|
||||||
|
medium: Memory
|
||||||
|
- name: hf-cache
|
||||||
|
hostPath:
|
||||||
|
path: /root/.cache/huggingface
|
||||||
|
type: Directory
|
||||||
|
|
||||||
|
common_container_settings: &common_container_settings
|
||||||
|
command:
|
||||||
|
- bash .buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
nvidia.com/gpu: 8
|
||||||
|
volumeMounts:
|
||||||
|
- name: devshm
|
||||||
|
mountPath: /dev/shm
|
||||||
|
- name: hf-cache
|
||||||
|
mountPath: /root/.cache/huggingface
|
||||||
|
env:
|
||||||
|
- name: VLLM_USAGE_SOURCE
|
||||||
|
value: ci-test
|
||||||
|
- name: HF_HOME
|
||||||
|
value: /root/.cache/huggingface
|
||||||
|
- name: VLLM_SOURCE_CODE_LOC
|
||||||
|
value: /workspace/build/buildkite/vllm/performance-benchmark
|
||||||
|
- name: HF_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: hf-token-secret
|
||||||
|
key: token
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- block: ":rocket: Ready for comparing vllm against alternatives? This will take 4 hours."
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- label: "A100 vllm step 10"
|
||||||
|
priority: 100
|
||||||
|
agents:
|
||||||
|
queue: A100
|
||||||
|
plugins:
|
||||||
|
- kubernetes:
|
||||||
|
podSpec:
|
||||||
|
<<: *common_pod_spec
|
||||||
|
containers:
|
||||||
|
- image: vllm/vllm-openai:v0.6.2
|
||||||
|
<<: *common_container_settings
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- label: "A100 sglang benchmark"
|
||||||
|
priority: 100
|
||||||
|
agents:
|
||||||
|
queue: A100
|
||||||
|
plugins:
|
||||||
|
- kubernetes:
|
||||||
|
podSpec:
|
||||||
|
<<: *common_pod_spec
|
||||||
|
containers:
|
||||||
|
- image: lmsysorg/sglang:v0.3.2-cu121
|
||||||
|
<<: *common_container_settings
|
||||||
|
|
||||||
|
- label: "A100 lmdeploy benchmark"
|
||||||
|
priority: 100
|
||||||
|
agents:
|
||||||
|
queue: A100
|
||||||
|
plugins:
|
||||||
|
- kubernetes:
|
||||||
|
podSpec:
|
||||||
|
<<: *common_pod_spec
|
||||||
|
containers:
|
||||||
|
- image: openmmlab/lmdeploy:v0.6.1-cu12
|
||||||
|
<<: *common_container_settings
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- label: "A100 trt llama-8B"
|
||||||
|
priority: 100
|
||||||
|
agents:
|
||||||
|
queue: A100
|
||||||
|
plugins:
|
||||||
|
- kubernetes:
|
||||||
|
podSpec:
|
||||||
|
<<: *common_pod_spec
|
||||||
|
containers:
|
||||||
|
- image: nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3
|
||||||
|
<<: *common_container_settings
|
||||||
|
env:
|
||||||
|
- name: VLLM_USAGE_SOURCE
|
||||||
|
value: ci-test
|
||||||
|
- name: HF_HOME
|
||||||
|
value: /root/.cache/huggingface
|
||||||
|
- name: VLLM_SOURCE_CODE_LOC
|
||||||
|
value: /workspace/build/buildkite/vllm/performance-benchmark
|
||||||
|
- name: HF_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: hf-token-secret
|
||||||
|
key: token
|
||||||
|
- name: TEST_SELECTOR
|
||||||
|
value: "llama8B"
|
||||||
|
|
||||||
|
|
||||||
|
- label: "A100 trt llama-70B"
|
||||||
|
priority: 100
|
||||||
|
agents:
|
||||||
|
queue: A100
|
||||||
|
plugins:
|
||||||
|
- kubernetes:
|
||||||
|
podSpec:
|
||||||
|
<<: *common_pod_spec
|
||||||
|
containers:
|
||||||
|
- image: nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3
|
||||||
|
<<: *common_container_settings
|
||||||
|
env:
|
||||||
|
- name: VLLM_USAGE_SOURCE
|
||||||
|
value: ci-test
|
||||||
|
- name: HF_HOME
|
||||||
|
value: /root/.cache/huggingface
|
||||||
|
- name: VLLM_SOURCE_CODE_LOC
|
||||||
|
value: /workspace/build/buildkite/vllm/performance-benchmark
|
||||||
|
- name: HF_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: hf-token-secret
|
||||||
|
key: token
|
||||||
|
- name: TEST_SELECTOR
|
||||||
|
value: "llama70B"
|
||||||
|
|
||||||
|
|
||||||
|
# FIXME(Kuntai): uncomment this after NVIDIA gives us their test docker image
|
||||||
|
# - label: "A100 trt benchmark"
|
||||||
|
# priority: 100
|
||||||
|
# agents:
|
||||||
|
# queue: A100
|
||||||
|
# plugins:
|
||||||
|
# - kubernetes:
|
||||||
|
# podSpec:
|
||||||
|
# <<: *common_pod_spec
|
||||||
|
# containers:
|
||||||
|
# - image: nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3
|
||||||
|
# <<: *common_container_settings
|
||||||
|
|
||||||
|
|
||||||
|
# FIXME(Kuntai): uncomment this after TGI supports `--ignore-eos`.
|
||||||
|
# - label: "A100 tgi benchmark"
|
||||||
|
# priority: 100
|
||||||
|
# agents:
|
||||||
|
# queue: A100
|
||||||
|
# plugins:
|
||||||
|
# - kubernetes:
|
||||||
|
# podSpec:
|
||||||
|
# <<: *common_pod_spec
|
||||||
|
# containers:
|
||||||
|
# - image: ghcr.io/huggingface/text-generation-inference:2.2.0
|
||||||
|
# <<: *common_container_settings
|
||||||
|
|
||||||
|
- wait
|
||||||
|
|
||||||
|
- label: "Collect the results"
|
||||||
|
priority: 100
|
||||||
|
agents:
|
||||||
|
queue: A100
|
||||||
|
plugins:
|
||||||
|
- kubernetes:
|
||||||
|
podSpec:
|
||||||
|
<<: *common_pod_spec
|
||||||
|
containers:
|
||||||
|
- image: vllm/vllm-openai:v0.5.0.post1
|
||||||
|
command:
|
||||||
|
- bash .buildkite/nightly-benchmarks/scripts/nightly-annotate.sh
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
nvidia.com/gpu: 8
|
||||||
|
volumeMounts:
|
||||||
|
- name: devshm
|
||||||
|
mountPath: /dev/shm
|
||||||
|
env:
|
||||||
|
- name: VLLM_USAGE_SOURCE
|
||||||
|
value: ci-test
|
||||||
|
- name: VLLM_SOURCE_CODE_LOC
|
||||||
|
value: /workspace/build/buildkite/vllm/performance-benchmark
|
||||||
|
- name: HF_TOKEN
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: hf-token-secret
|
||||||
|
key: token
|
||||||
|
|
||||||
|
- block: ":rocket: check the results!"
|
||||||
@@ -1,12 +1,10 @@
|
|||||||
# Performance benchmarks descriptions
|
|
||||||
|
|
||||||
## Latency tests
|
## Latency tests
|
||||||
|
|
||||||
- Input length: 32 tokens.
|
- Input length: 32 tokens.
|
||||||
- Output length: 128 tokens.
|
- Output length: 128 tokens.
|
||||||
- Batch size: fixed (8).
|
- Batch size: fixed (8).
|
||||||
- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
- Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||||
- CPU Models: llama-3.1 8B.
|
|
||||||
- Evaluation metrics: end-to-end latency (mean, median, p99).
|
- Evaluation metrics: end-to-end latency (mean, median, p99).
|
||||||
|
|
||||||
{latency_tests_markdown_table}
|
{latency_tests_markdown_table}
|
||||||
@@ -16,8 +14,7 @@
|
|||||||
- Input length: randomly sample 200 prompts from ShareGPT dataset (with fixed random seed).
|
- Input length: randomly sample 200 prompts from ShareGPT dataset (with fixed random seed).
|
||||||
- Output length: the corresponding output length of these 200 prompts.
|
- Output length: the corresponding output length of these 200 prompts.
|
||||||
- Batch size: dynamically determined by vllm to achieve maximum throughput.
|
- Batch size: dynamically determined by vllm to achieve maximum throughput.
|
||||||
- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
- Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||||
- CPU Models: llama-3.1 8B.
|
|
||||||
- Evaluation metrics: throughput.
|
- Evaluation metrics: throughput.
|
||||||
|
|
||||||
{throughput_tests_markdown_table}
|
{throughput_tests_markdown_table}
|
||||||
@@ -28,18 +25,12 @@
|
|||||||
- Output length: the corresponding output length of these 200 prompts.
|
- Output length: the corresponding output length of these 200 prompts.
|
||||||
- Batch size: dynamically determined by vllm and the arrival pattern of the requests.
|
- Batch size: dynamically determined by vllm and the arrival pattern of the requests.
|
||||||
- **Average QPS (query per second)**: 1, 4, 16 and inf. QPS = inf means all requests come at once. For other QPS values, the arrival time of each query is determined using a random Poisson process (with fixed random seed).
|
- **Average QPS (query per second)**: 1, 4, 16 and inf. QPS = inf means all requests come at once. For other QPS values, the arrival time of each query is determined using a random Poisson process (with fixed random seed).
|
||||||
- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
- Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||||
- We also added a speculative decoding test for llama-3 70B on GPU, under QPS 2
|
- We also added a speculative decoding test for llama-3 70B, under QPS 2
|
||||||
- CPU Models: llama-3.1 8B.
|
|
||||||
- Evaluation metrics: throughput, TTFT (time to the first token, with mean, median and p99), ITL (inter-token latency, with mean, median and p99).
|
- Evaluation metrics: throughput, TTFT (time to the first token, with mean, median and p99), ITL (inter-token latency, with mean, median and p99).
|
||||||
- For CPU, we added random dataset tests to benchmark fixed input/output length with 100 prompts.
|
|
||||||
|
|
||||||
{serving_tests_markdown_table}
|
{serving_tests_markdown_table}
|
||||||
|
|
||||||
## Platform Information
|
|
||||||
|
|
||||||
{platform_markdown_table}
|
|
||||||
|
|
||||||
## json version of the benchmarking tables
|
## json version of the benchmarking tables
|
||||||
|
|
||||||
This section contains the data of the markdown tables above in JSON format.
|
This section contains the data of the markdown tables above in JSON format.
|
||||||
@@ -0,0 +1,221 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from tabulate import tabulate
|
||||||
|
|
||||||
|
results_folder = Path("results/")
|
||||||
|
|
||||||
|
# latency results and the keys that will be printed into markdown
|
||||||
|
latency_results = []
|
||||||
|
latency_column_mapping = {
|
||||||
|
"test_name": "Test name",
|
||||||
|
"gpu_type": "GPU",
|
||||||
|
"avg_latency": "Mean latency (ms)",
|
||||||
|
# "P10": "P10 (s)",
|
||||||
|
# "P25": "P25 (s)",
|
||||||
|
"P50": "Median latency (ms)",
|
||||||
|
# "P75": "P75 (s)",
|
||||||
|
# "P90": "P90 (s)",
|
||||||
|
"P99": "P99 latency (ms)",
|
||||||
|
}
|
||||||
|
|
||||||
|
# throughput tests and the keys that will be printed into markdown
|
||||||
|
throughput_results = []
|
||||||
|
throughput_results_column_mapping = {
|
||||||
|
"test_name": "Test name",
|
||||||
|
"gpu_type": "GPU",
|
||||||
|
# "num_requests": "# of req.",
|
||||||
|
# "total_num_tokens": "Total # of tokens",
|
||||||
|
# "elapsed_time": "Elapsed time (s)",
|
||||||
|
"requests_per_second": "Tput (req/s)",
|
||||||
|
# "tokens_per_second": "Tput (tok/s)",
|
||||||
|
}
|
||||||
|
|
||||||
|
# serving results and the keys that will be printed into markdown
|
||||||
|
serving_results = []
|
||||||
|
serving_column_mapping = {
|
||||||
|
"test_name": "Test name",
|
||||||
|
"gpu_type": "GPU",
|
||||||
|
# "completed": "# of req.",
|
||||||
|
"request_throughput": "Tput (req/s)",
|
||||||
|
# "input_throughput": "Input Tput (tok/s)",
|
||||||
|
# "output_throughput": "Output Tput (tok/s)",
|
||||||
|
"mean_ttft_ms": "Mean TTFT (ms)",
|
||||||
|
"median_ttft_ms": "Median TTFT (ms)",
|
||||||
|
"p99_ttft_ms": "P99 TTFT (ms)",
|
||||||
|
# "mean_tpot_ms": "Mean TPOT (ms)",
|
||||||
|
# "median_tpot_ms": "Median",
|
||||||
|
# "p99_tpot_ms": "P99",
|
||||||
|
"mean_itl_ms": "Mean ITL (ms)",
|
||||||
|
"median_itl_ms": "Median ITL (ms)",
|
||||||
|
"p99_itl_ms": "P99 ITL (ms)",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def read_markdown(file):
|
||||||
|
if os.path.exists(file):
|
||||||
|
with open(file) as f:
|
||||||
|
return f.read() + "\n"
|
||||||
|
else:
|
||||||
|
return f"{file} not found.\n"
|
||||||
|
|
||||||
|
|
||||||
|
def results_to_json(latency, throughput, serving):
|
||||||
|
return json.dumps({
|
||||||
|
'latency': latency.to_dict(),
|
||||||
|
'throughput': throughput.to_dict(),
|
||||||
|
'serving': serving.to_dict()
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
# collect results
|
||||||
|
for test_file in results_folder.glob("*.json"):
|
||||||
|
|
||||||
|
with open(test_file) as f:
|
||||||
|
raw_result = json.loads(f.read())
|
||||||
|
|
||||||
|
if "serving" in str(test_file):
|
||||||
|
# this result is generated via `benchmark_serving.py`
|
||||||
|
|
||||||
|
# attach the benchmarking command to raw_result
|
||||||
|
try:
|
||||||
|
with open(test_file.with_suffix(".commands")) as f:
|
||||||
|
command = json.loads(f.read())
|
||||||
|
except OSError as e:
|
||||||
|
print(e)
|
||||||
|
continue
|
||||||
|
|
||||||
|
raw_result.update(command)
|
||||||
|
|
||||||
|
# update the test name of this result
|
||||||
|
raw_result.update({"test_name": test_file.stem})
|
||||||
|
|
||||||
|
# add the result to raw_result
|
||||||
|
serving_results.append(raw_result)
|
||||||
|
continue
|
||||||
|
|
||||||
|
elif "latency" in f.name:
|
||||||
|
# this result is generated via `benchmark_latency.py`
|
||||||
|
|
||||||
|
# attach the benchmarking command to raw_result
|
||||||
|
try:
|
||||||
|
with open(test_file.with_suffix(".commands")) as f:
|
||||||
|
command = json.loads(f.read())
|
||||||
|
except OSError as e:
|
||||||
|
print(e)
|
||||||
|
continue
|
||||||
|
|
||||||
|
raw_result.update(command)
|
||||||
|
|
||||||
|
# update the test name of this result
|
||||||
|
raw_result.update({"test_name": test_file.stem})
|
||||||
|
|
||||||
|
# get different percentiles
|
||||||
|
for perc in [10, 25, 50, 75, 90, 99]:
|
||||||
|
# Multiply 1000 to convert the time unit from s to ms
|
||||||
|
raw_result.update(
|
||||||
|
{f"P{perc}": 1000 * raw_result["percentiles"][str(perc)]})
|
||||||
|
raw_result["avg_latency"] = raw_result["avg_latency"] * 1000
|
||||||
|
|
||||||
|
# add the result to raw_result
|
||||||
|
latency_results.append(raw_result)
|
||||||
|
continue
|
||||||
|
|
||||||
|
elif "throughput" in f.name:
|
||||||
|
# this result is generated via `benchmark_throughput.py`
|
||||||
|
|
||||||
|
# attach the benchmarking command to raw_result
|
||||||
|
try:
|
||||||
|
with open(test_file.with_suffix(".commands")) as f:
|
||||||
|
command = json.loads(f.read())
|
||||||
|
except OSError as e:
|
||||||
|
print(e)
|
||||||
|
continue
|
||||||
|
|
||||||
|
raw_result.update(command)
|
||||||
|
|
||||||
|
# update the test name of this result
|
||||||
|
raw_result.update({"test_name": test_file.stem})
|
||||||
|
|
||||||
|
# add the result to raw_result
|
||||||
|
throughput_results.append(raw_result)
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"Skipping {test_file}")
|
||||||
|
|
||||||
|
latency_results = pd.DataFrame.from_dict(latency_results)
|
||||||
|
serving_results = pd.DataFrame.from_dict(serving_results)
|
||||||
|
throughput_results = pd.DataFrame.from_dict(throughput_results)
|
||||||
|
|
||||||
|
raw_results_json = results_to_json(latency_results, throughput_results,
|
||||||
|
serving_results)
|
||||||
|
|
||||||
|
# remapping the key, for visualization purpose
|
||||||
|
if not latency_results.empty:
|
||||||
|
latency_results = latency_results[list(
|
||||||
|
latency_column_mapping.keys())].rename(
|
||||||
|
columns=latency_column_mapping)
|
||||||
|
if not serving_results.empty:
|
||||||
|
serving_results = serving_results[list(
|
||||||
|
serving_column_mapping.keys())].rename(
|
||||||
|
columns=serving_column_mapping)
|
||||||
|
if not throughput_results.empty:
|
||||||
|
throughput_results = throughput_results[list(
|
||||||
|
throughput_results_column_mapping.keys())].rename(
|
||||||
|
columns=throughput_results_column_mapping)
|
||||||
|
|
||||||
|
processed_results_json = results_to_json(latency_results,
|
||||||
|
throughput_results,
|
||||||
|
serving_results)
|
||||||
|
|
||||||
|
for df in [latency_results, serving_results, throughput_results]:
|
||||||
|
if df.empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Sort all dataframes by their respective "Test name" columns
|
||||||
|
df.sort_values(by="Test name", inplace=True)
|
||||||
|
|
||||||
|
# The GPUs sometimes come in format of "GPUTYPE\nGPUTYPE\n...",
|
||||||
|
# we want to turn it into "8xGPUTYPE"
|
||||||
|
df["GPU"] = df["GPU"].apply(
|
||||||
|
lambda x: f"{len(x.split('\n'))}x{x.split('\n')[0]}")
|
||||||
|
|
||||||
|
# get markdown tables
|
||||||
|
latency_md_table = tabulate(latency_results,
|
||||||
|
headers='keys',
|
||||||
|
tablefmt='pipe',
|
||||||
|
showindex=False)
|
||||||
|
serving_md_table = tabulate(serving_results,
|
||||||
|
headers='keys',
|
||||||
|
tablefmt='pipe',
|
||||||
|
showindex=False)
|
||||||
|
throughput_md_table = tabulate(throughput_results,
|
||||||
|
headers='keys',
|
||||||
|
tablefmt='pipe',
|
||||||
|
showindex=False)
|
||||||
|
|
||||||
|
# document the result
|
||||||
|
with open(results_folder / "benchmark_results.md", "w") as f:
|
||||||
|
|
||||||
|
results = read_markdown("../.buildkite/nightly-benchmarks/" +
|
||||||
|
"performance-benchmarks-descriptions.md")
|
||||||
|
results = results.format(
|
||||||
|
latency_tests_markdown_table=latency_md_table,
|
||||||
|
throughput_tests_markdown_table=throughput_md_table,
|
||||||
|
serving_tests_markdown_table=serving_md_table,
|
||||||
|
benchmarking_results_in_json_string=processed_results_json)
|
||||||
|
f.write(results)
|
||||||
|
|
||||||
|
# document benchmarking results in json
|
||||||
|
with open(results_folder / "benchmark_results.json", "w") as f:
|
||||||
|
|
||||||
|
results = latency_results.to_dict(
|
||||||
|
orient='records') + throughput_results.to_dict(
|
||||||
|
orient='records') + serving_results.to_dict(orient='records')
|
||||||
|
f.write(json.dumps(results))
|
||||||
28
.buildkite/nightly-benchmarks/scripts/download-tokenizer.py
Normal file
28
.buildkite/nightly-benchmarks/scripts/download-tokenizer.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
|
||||||
|
def main(model, cachedir):
|
||||||
|
# Load the tokenizer and save it to the specified directory
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model)
|
||||||
|
tokenizer.save_pretrained(cachedir)
|
||||||
|
print(f"Tokenizer saved to {cachedir}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Download and save Hugging Face tokenizer")
|
||||||
|
parser.add_argument("--model",
|
||||||
|
type=str,
|
||||||
|
required=True,
|
||||||
|
help="Name of the model")
|
||||||
|
parser.add_argument("--cachedir",
|
||||||
|
type=str,
|
||||||
|
required=True,
|
||||||
|
help="Directory to save the tokenizer")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
main(args.model, args.cachedir)
|
||||||
@@ -0,0 +1,97 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from tabulate import tabulate
|
||||||
|
|
||||||
|
|
||||||
|
def parse_arguments():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=
|
||||||
|
'Parse command line arguments for summary-nightly-results script.')
|
||||||
|
parser.add_argument('--results-folder',
|
||||||
|
type=str,
|
||||||
|
required=True,
|
||||||
|
help='The folder where the results are stored.')
|
||||||
|
parser.add_argument('--description',
|
||||||
|
type=str,
|
||||||
|
required=True,
|
||||||
|
help='Description of the results.')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
def get_perf(df, method, model, metric):
|
||||||
|
|
||||||
|
means = []
|
||||||
|
|
||||||
|
for qps in [2, 4, 8, 16, "inf"]:
|
||||||
|
target = df['Test name'].str.contains(model)
|
||||||
|
target = target & df['Engine'].str.contains(method)
|
||||||
|
target = target & df['Test name'].str.contains("qps_" + str(qps))
|
||||||
|
filtered_df = df[target]
|
||||||
|
|
||||||
|
if filtered_df.empty:
|
||||||
|
means.append(0.)
|
||||||
|
else:
|
||||||
|
means.append(filtered_df[metric].values[0])
|
||||||
|
|
||||||
|
return np.array(means)
|
||||||
|
|
||||||
|
|
||||||
|
def get_perf_w_std(df, method, model, metric):
|
||||||
|
|
||||||
|
if metric in ["TTFT", "ITL"]:
|
||||||
|
mean = get_perf(df, method, model, "Mean " + metric + " (ms)")
|
||||||
|
mean = mean.tolist()
|
||||||
|
std = get_perf(df, method, model, "Std " + metric + " (ms)")
|
||||||
|
if std.mean() == 0:
|
||||||
|
std = None
|
||||||
|
success = get_perf(df, method, model, "Successful req.")
|
||||||
|
if std is not None:
|
||||||
|
std = std / np.sqrt(success)
|
||||||
|
std = std.tolist()
|
||||||
|
|
||||||
|
else:
|
||||||
|
assert metric == "Tput"
|
||||||
|
mean = get_perf(df, method, model, "Input Tput (tok/s)") + get_perf(
|
||||||
|
df, method, model, "Output Tput (tok/s)")
|
||||||
|
mean = mean.tolist()
|
||||||
|
std = None
|
||||||
|
|
||||||
|
return mean, std
|
||||||
|
|
||||||
|
|
||||||
|
def main(args):
|
||||||
|
results_folder = Path(args.results_folder)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
|
# collect results
|
||||||
|
for test_file in results_folder.glob("*_nightly_results.json"):
|
||||||
|
with open(test_file) as f:
|
||||||
|
results = results + json.loads(f.read())
|
||||||
|
|
||||||
|
# generate markdown table
|
||||||
|
df = pd.DataFrame.from_dict(results)
|
||||||
|
|
||||||
|
md_table = tabulate(df, headers='keys', tablefmt='pipe', showindex=False)
|
||||||
|
|
||||||
|
with open(args.description) as f:
|
||||||
|
description = f.read()
|
||||||
|
|
||||||
|
description = description.format(
|
||||||
|
nightly_results_benchmarking_table=md_table)
|
||||||
|
|
||||||
|
with open("nightly_results.md", "w") as f:
|
||||||
|
f.write(description)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
args = parse_arguments()
|
||||||
|
main(args)
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
from lmdeploy.serve.openai.api_client import APIClient
|
||||||
|
|
||||||
|
api_client = APIClient("http://localhost:8000")
|
||||||
|
model_name = api_client.available_models[0]
|
||||||
|
|
||||||
|
print(model_name)
|
||||||
@@ -181,14 +181,18 @@ launch_vllm_server() {
|
|||||||
if echo "$common_params" | jq -e 'has("fp8")' >/dev/null; then
|
if echo "$common_params" | jq -e 'has("fp8")' >/dev/null; then
|
||||||
echo "Key 'fp8' exists in common params. Use neuralmagic fp8 model for convenience."
|
echo "Key 'fp8' exists in common params. Use neuralmagic fp8 model for convenience."
|
||||||
model=$(echo "$common_params" | jq -r '.neuralmagic_quantized_model')
|
model=$(echo "$common_params" | jq -r '.neuralmagic_quantized_model')
|
||||||
server_command="vllm serve $model \
|
server_command="python3 \
|
||||||
|
-m vllm.entrypoints.openai.api_server \
|
||||||
-tp $tp \
|
-tp $tp \
|
||||||
|
--model $model \
|
||||||
--port $port \
|
--port $port \
|
||||||
$server_args"
|
$server_args"
|
||||||
else
|
else
|
||||||
echo "Key 'fp8' does not exist in common params."
|
echo "Key 'fp8' does not exist in common params."
|
||||||
server_command="vllm serve $model \
|
server_command="python3 \
|
||||||
|
-m vllm.entrypoints.openai.api_server \
|
||||||
-tp $tp \
|
-tp $tp \
|
||||||
|
--model $model \
|
||||||
--port $port \
|
--port $port \
|
||||||
$server_args"
|
$server_args"
|
||||||
fi
|
fi
|
||||||
78
.buildkite/nightly-benchmarks/scripts/nightly-annotate.sh
Normal file
78
.buildkite/nightly-benchmarks/scripts/nightly-annotate.sh
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
|
||||||
|
main() {
|
||||||
|
|
||||||
|
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
||||||
|
(which jq) || (apt-get update && apt-get -y install jq)
|
||||||
|
(which zip) || (apt-get install -y zip)
|
||||||
|
|
||||||
|
if [ ! -f /workspace/buildkite-agent ]; then
|
||||||
|
echo "buildkite-agent binary not found. Skip plotting the results."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# initial annotation
|
||||||
|
#description="$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/nightly-descriptions.md"
|
||||||
|
|
||||||
|
# download results
|
||||||
|
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||||
|
mkdir -p results/
|
||||||
|
/workspace/buildkite-agent artifact download 'results/*nightly_results.json' results/
|
||||||
|
ls
|
||||||
|
ls results/
|
||||||
|
|
||||||
|
# upload benchmark results
|
||||||
|
zip -r results.zip results/
|
||||||
|
/workspace/buildkite-agent artifact upload "results.zip"
|
||||||
|
|
||||||
|
# upload benchmarking scripts
|
||||||
|
cd "$VLLM_SOURCE_CODE_LOC/"
|
||||||
|
zip -r nightly-benchmarks.zip .buildkite/ benchmarks/
|
||||||
|
/workspace/buildkite-agent artifact upload "nightly-benchmarks.zip"
|
||||||
|
|
||||||
|
cd "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/"
|
||||||
|
# upload benchmarking pipeline
|
||||||
|
/workspace/buildkite-agent artifact upload "nightly-pipeline.yaml"
|
||||||
|
|
||||||
|
cd "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/"
|
||||||
|
/workspace/buildkite-agent annotate --style "success" --context "nightly-benchmarks-results" --append < nightly-annotation.md
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# The figures should be generated by a separate process outside the CI/CD pipeline
|
||||||
|
|
||||||
|
# # generate figures
|
||||||
|
# python3 -m pip install tabulate pandas matplotlib
|
||||||
|
|
||||||
|
# python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py \
|
||||||
|
# --description $description \
|
||||||
|
# --results-folder results/
|
||||||
|
|
||||||
|
|
||||||
|
# python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \
|
||||||
|
# --description $description \
|
||||||
|
# --results-folder results/ \
|
||||||
|
# --dataset sharegpt
|
||||||
|
|
||||||
|
# python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \
|
||||||
|
# --description $description \
|
||||||
|
# --results-folder results/ \
|
||||||
|
# --dataset sonnet_2048_128
|
||||||
|
|
||||||
|
# python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \
|
||||||
|
# --description $description \
|
||||||
|
# --results-folder results/ \
|
||||||
|
# --dataset sonnet_128_2048
|
||||||
|
|
||||||
|
# # upload results and figures
|
||||||
|
# /workspace/buildkite-agent artifact upload "nightly_results*.png"
|
||||||
|
# /workspace/buildkite-agent artifact upload $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/nightly-pipeline.yaml
|
||||||
|
# /workspace/buildkite-agent artifact upload $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/tests/nightly-tests.json
|
||||||
|
# /workspace/buildkite-agent annotate --style "success" --context "nightly-benchmarks-results" --append < nightly_results.md
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
462
.buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh
Normal file
462
.buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh
Normal file
@@ -0,0 +1,462 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -o pipefail
|
||||||
|
set -x
|
||||||
|
|
||||||
|
check_gpus() {
|
||||||
|
# check the number of GPUs and GPU type.
|
||||||
|
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||||
|
if [[ $gpu_count -gt 0 ]]; then
|
||||||
|
echo "GPU found."
|
||||||
|
else
|
||||||
|
echo "Need at least 1 GPU to run benchmarking."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
declare -g gpu_type="$(nvidia-smi --query-gpu=name --format=csv,noheader | awk '{print $2}')"
|
||||||
|
echo "GPU type is $gpu_type"
|
||||||
|
}
|
||||||
|
|
||||||
|
check_hf_token() {
|
||||||
|
# check if HF_TOKEN is available and valid
|
||||||
|
if [[ -z "$HF_TOKEN" ]]; then
|
||||||
|
echo "Error: HF_TOKEN is not set."
|
||||||
|
exit 1
|
||||||
|
elif [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then
|
||||||
|
echo "Error: HF_TOKEN does not start with 'hf_'."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "HF_TOKEN is set and valid."
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
upload_to_buildkite() {
|
||||||
|
# upload the benchmarking results to buildkite
|
||||||
|
|
||||||
|
# if the agent binary is not found, skip uploading the results, exit 0
|
||||||
|
if [ ! -f /workspace/buildkite-agent ]; then
|
||||||
|
echo "buildkite-agent binary not found. Skip uploading the results."
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
# /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md
|
||||||
|
/workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
get_current_llm_serving_engine() {
|
||||||
|
|
||||||
|
if which lmdeploy >/dev/null; then
|
||||||
|
echo "Container: lmdeploy"
|
||||||
|
export CURRENT_LLM_SERVING_ENGINE=lmdeploy
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -e /tgi-entrypoint.sh ]; then
|
||||||
|
echo "Container: tgi"
|
||||||
|
export CURRENT_LLM_SERVING_ENGINE=tgi
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if which trtllm-build >/dev/null; then
|
||||||
|
echo "Container: tensorrt-llm"
|
||||||
|
export CURRENT_LLM_SERVING_ENGINE=trt
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -e /sgl-workspace ]; then
|
||||||
|
echo "Container: sglang"
|
||||||
|
export CURRENT_LLM_SERVING_ENGINE=sglang
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -e /vllm-workspace ]; then
|
||||||
|
echo "Container: vllm"
|
||||||
|
# move to a completely irrelevant directory, to avoid import vllm from current folder
|
||||||
|
export CURRENT_LLM_SERVING_ENGINE=vllm
|
||||||
|
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
json2args() {
|
||||||
|
# transforms the JSON string to command line args, and '_' is replaced to '-'
|
||||||
|
# example:
|
||||||
|
# input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 }
|
||||||
|
# output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1
|
||||||
|
local json_string=$1
|
||||||
|
local args=$(
|
||||||
|
echo "$json_string" | jq -r '
|
||||||
|
to_entries |
|
||||||
|
map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) |
|
||||||
|
join(" ")
|
||||||
|
'
|
||||||
|
)
|
||||||
|
echo "$args"
|
||||||
|
}
|
||||||
|
|
||||||
|
kill_gpu_processes() {
|
||||||
|
pkill -f python
|
||||||
|
pkill -f python3
|
||||||
|
pkill -f tritonserver
|
||||||
|
pkill -f pt_main_thread
|
||||||
|
pkill -f text-generation
|
||||||
|
pkill -f lmdeploy
|
||||||
|
|
||||||
|
while [ "$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits | head -n 1)" -ge 1000 ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for_server() {
|
||||||
|
# wait for vllm server to start
|
||||||
|
# return 1 if vllm server crashes
|
||||||
|
timeout 1200 bash -c '
|
||||||
|
until curl -s localhost:8000/v1/completions > /dev/null; do
|
||||||
|
sleep 1
|
||||||
|
done' && return 0 || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure_installed() {
|
||||||
|
# Ensure that the given command is installed by apt-get
|
||||||
|
local cmd=$1
|
||||||
|
if ! which "$cmd" >/dev/null; then
|
||||||
|
apt-get update && apt-get install -y "$cmd"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
run_serving_tests() {
|
||||||
|
# run serving tests using `benchmark_serving.py`
|
||||||
|
# $1: a json file specifying serving test cases
|
||||||
|
|
||||||
|
local serving_test_file
|
||||||
|
serving_test_file=$1
|
||||||
|
|
||||||
|
# Iterate over serving tests
|
||||||
|
jq -c '.[]' "$serving_test_file" | while read -r params; do
|
||||||
|
# get the test name, and append the GPU type back to it.
|
||||||
|
test_name=$(echo "$params" | jq -r '.test_name')
|
||||||
|
|
||||||
|
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||||
|
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||||
|
echo "Skip test case $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# prepend the current serving engine to the test name
|
||||||
|
test_name=${CURRENT_LLM_SERVING_ENGINE}_${test_name}
|
||||||
|
|
||||||
|
# get common parameters
|
||||||
|
common_params=$(echo "$params" | jq -r '.common_parameters')
|
||||||
|
model=$(echo "$common_params" | jq -r '.model')
|
||||||
|
tp=$(echo "$common_params" | jq -r '.tp')
|
||||||
|
dataset_name=$(echo "$common_params" | jq -r '.dataset_name')
|
||||||
|
dataset_path=$(echo "$common_params" | jq -r '.dataset_path')
|
||||||
|
port=$(echo "$common_params" | jq -r '.port')
|
||||||
|
num_prompts=$(echo "$common_params" | jq -r '.num_prompts')
|
||||||
|
reuse_server=$(echo "$common_params" | jq -r '.reuse_server')
|
||||||
|
|
||||||
|
# get client and server arguments
|
||||||
|
server_params=$(echo "$params" | jq -r ".${CURRENT_LLM_SERVING_ENGINE}_server_parameters")
|
||||||
|
client_params=$(echo "$params" | jq -r ".${CURRENT_LLM_SERVING_ENGINE}_client_parameters")
|
||||||
|
client_args=$(json2args "$client_params")
|
||||||
|
qps_list=$(echo "$params" | jq -r '.qps_list')
|
||||||
|
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
||||||
|
echo "Running over qps list $qps_list"
|
||||||
|
|
||||||
|
# check if there is enough GPU to run the test
|
||||||
|
if [[ $gpu_count -lt $tp ]]; then
|
||||||
|
echo "Required num-shard $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $reuse_server == "true" ]]; then
|
||||||
|
echo "Reuse previous server for test case $test_name"
|
||||||
|
else
|
||||||
|
kill_gpu_processes
|
||||||
|
bash "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/launch-server.sh" \
|
||||||
|
"$server_params" "$common_params"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if wait_for_server; then
|
||||||
|
echo ""
|
||||||
|
echo "$CURRENT_LLM_SERVING_ENGINE server is up and running."
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
echo "$CURRENT_LLM_SERVING_ENGINE failed to start within the timeout period."
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
# prepare tokenizer
|
||||||
|
# this is required for lmdeploy.
|
||||||
|
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||||
|
rm -rf /tokenizer_cache
|
||||||
|
mkdir /tokenizer_cache
|
||||||
|
python3 ../.buildkite/nightly-benchmarks/scripts/download-tokenizer.py \
|
||||||
|
--model "$model" \
|
||||||
|
--cachedir /tokenizer_cache
|
||||||
|
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||||
|
|
||||||
|
|
||||||
|
# change model name for lmdeploy (it will not follow standard hf name)
|
||||||
|
if [[ "$CURRENT_LLM_SERVING_ENGINE" == "lmdeploy" ]]; then
|
||||||
|
model=$(python ../.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# iterate over different QPS
|
||||||
|
for qps in $qps_list; do
|
||||||
|
# remove the surrounding single quote from qps
|
||||||
|
if [[ "$qps" == *"inf"* ]]; then
|
||||||
|
echo "qps was $qps"
|
||||||
|
qps="inf"
|
||||||
|
echo "now qps is $qps"
|
||||||
|
fi
|
||||||
|
|
||||||
|
new_test_name=$test_name"_qps_"$qps
|
||||||
|
|
||||||
|
backend=$CURRENT_LLM_SERVING_ENGINE
|
||||||
|
|
||||||
|
if [[ $backend = "trt" ]]; then
|
||||||
|
backend="tensorrt-llm"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$backend" == *"vllm"* ]]; then
|
||||||
|
backend="vllm"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$dataset_name" = "sharegpt" ]]; then
|
||||||
|
|
||||||
|
client_command="python3 benchmark_serving.py \
|
||||||
|
--backend $backend \
|
||||||
|
--tokenizer /tokenizer_cache \
|
||||||
|
--model $model \
|
||||||
|
--dataset-name $dataset_name \
|
||||||
|
--dataset-path $dataset_path \
|
||||||
|
--num-prompts $num_prompts \
|
||||||
|
--port $port \
|
||||||
|
--save-result \
|
||||||
|
--result-dir $RESULTS_FOLDER \
|
||||||
|
--result-filename ${new_test_name}.json \
|
||||||
|
--request-rate $qps \
|
||||||
|
--ignore-eos \
|
||||||
|
$client_args"
|
||||||
|
|
||||||
|
elif [[ "$dataset_name" = "sonnet" ]]; then
|
||||||
|
|
||||||
|
sonnet_input_len=$(echo "$common_params" | jq -r '.sonnet_input_len')
|
||||||
|
sonnet_output_len=$(echo "$common_params" | jq -r '.sonnet_output_len')
|
||||||
|
sonnet_prefix_len=$(echo "$common_params" | jq -r '.sonnet_prefix_len')
|
||||||
|
|
||||||
|
client_command="python3 benchmark_serving.py \
|
||||||
|
--backend $backend \
|
||||||
|
--tokenizer /tokenizer_cache \
|
||||||
|
--model $model \
|
||||||
|
--dataset-name $dataset_name \
|
||||||
|
--dataset-path $dataset_path \
|
||||||
|
--num-prompts $num_prompts \
|
||||||
|
--sonnet-input-len $sonnet_input_len \
|
||||||
|
--sonnet-output-len $sonnet_output_len \
|
||||||
|
--sonnet-prefix-len $sonnet_prefix_len \
|
||||||
|
--port $port \
|
||||||
|
--save-result \
|
||||||
|
--result-dir $RESULTS_FOLDER \
|
||||||
|
--result-filename ${new_test_name}.json \
|
||||||
|
--request-rate $qps \
|
||||||
|
--ignore-eos \
|
||||||
|
$client_args"
|
||||||
|
|
||||||
|
else
|
||||||
|
|
||||||
|
echo "The dataset name must be either 'sharegpt' or 'sonnet'. Got $dataset_name."
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
echo "Running test case $test_name with qps $qps"
|
||||||
|
echo "Client command: $client_command"
|
||||||
|
|
||||||
|
eval "$client_command"
|
||||||
|
|
||||||
|
server_command="None"
|
||||||
|
|
||||||
|
# record the benchmarking commands
|
||||||
|
jq_output=$(jq -n \
|
||||||
|
--arg server "$server_command" \
|
||||||
|
--arg client "$client_command" \
|
||||||
|
--arg gpu "$gpu_type" \
|
||||||
|
--arg engine "$CURRENT_LLM_SERVING_ENGINE" \
|
||||||
|
'{
|
||||||
|
server_command: $server,
|
||||||
|
client_command: $client,
|
||||||
|
gpu_type: $gpu,
|
||||||
|
engine: $engine
|
||||||
|
}')
|
||||||
|
echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands"
|
||||||
|
|
||||||
|
done
|
||||||
|
|
||||||
|
done
|
||||||
|
|
||||||
|
kill_gpu_processes
|
||||||
|
}
|
||||||
|
|
||||||
|
run_genai_perf_tests() {
|
||||||
|
# run genai-perf tests
|
||||||
|
|
||||||
|
# $1: a json file specifying genai-perf test cases
|
||||||
|
local genai_perf_test_file
|
||||||
|
genai_perf_test_file=$1
|
||||||
|
|
||||||
|
# Iterate over genai-perf tests
|
||||||
|
jq -c '.[]' "$genai_perf_test_file" | while read -r params; do
|
||||||
|
# get the test name, and append the GPU type back to it.
|
||||||
|
test_name=$(echo "$params" | jq -r '.test_name')
|
||||||
|
|
||||||
|
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||||
|
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||||
|
echo "Skip test case $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# prepend the current serving engine to the test name
|
||||||
|
test_name=${CURRENT_LLM_SERVING_ENGINE}_${test_name}
|
||||||
|
|
||||||
|
# get common parameters
|
||||||
|
common_params=$(echo "$params" | jq -r '.common_parameters')
|
||||||
|
model=$(echo "$common_params" | jq -r '.model')
|
||||||
|
tp=$(echo "$common_params" | jq -r '.tp')
|
||||||
|
dataset_name=$(echo "$common_params" | jq -r '.dataset_name')
|
||||||
|
dataset_path=$(echo "$common_params" | jq -r '.dataset_path')
|
||||||
|
port=$(echo "$common_params" | jq -r '.port')
|
||||||
|
num_prompts=$(echo "$common_params" | jq -r '.num_prompts')
|
||||||
|
reuse_server=$(echo "$common_params" | jq -r '.reuse_server')
|
||||||
|
|
||||||
|
# get client and server arguments
|
||||||
|
server_params=$(echo "$params" | jq -r ".${CURRENT_LLM_SERVING_ENGINE}_server_parameters")
|
||||||
|
qps_list=$(echo "$params" | jq -r '.qps_list')
|
||||||
|
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
||||||
|
echo "Running over qps list $qps_list"
|
||||||
|
|
||||||
|
# check if there is enough GPU to run the test
|
||||||
|
if [[ $gpu_count -lt $tp ]]; then
|
||||||
|
echo "Required num-shard $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $reuse_server == "true" ]]; then
|
||||||
|
echo "Reuse previous server for test case $test_name"
|
||||||
|
else
|
||||||
|
kill_gpu_processes
|
||||||
|
bash "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/launch-server.sh" \
|
||||||
|
"$server_params" "$common_params"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if wait_for_server; then
|
||||||
|
echo ""
|
||||||
|
echo "$CURRENT_LLM_SERVING_ENGINE server is up and running."
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
echo "$CURRENT_LLM_SERVING_ENGINE failed to start within the timeout period."
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
# iterate over different QPS
|
||||||
|
for qps in $qps_list; do
|
||||||
|
# remove the surrounding single quote from qps
|
||||||
|
if [[ "$qps" == *"inf"* ]]; then
|
||||||
|
echo "qps was $qps"
|
||||||
|
qps=$num_prompts
|
||||||
|
echo "now qps is $qps"
|
||||||
|
fi
|
||||||
|
|
||||||
|
new_test_name=$test_name"_qps_"$qps
|
||||||
|
backend=$CURRENT_LLM_SERVING_ENGINE
|
||||||
|
|
||||||
|
if [[ "$backend" == *"vllm"* ]]; then
|
||||||
|
backend="vllm"
|
||||||
|
fi
|
||||||
|
#TODO: add output dir.
|
||||||
|
client_command="genai-perf profile \
|
||||||
|
-m $model \
|
||||||
|
--service-kind openai \
|
||||||
|
--backend vllm \
|
||||||
|
--endpoint-type chat \
|
||||||
|
--streaming \
|
||||||
|
--url localhost:$port \
|
||||||
|
--request-rate $qps \
|
||||||
|
--num-prompts $num_prompts \
|
||||||
|
"
|
||||||
|
|
||||||
|
echo "Client command: $client_command"
|
||||||
|
|
||||||
|
eval "$client_command"
|
||||||
|
|
||||||
|
#TODO: process/record outputs
|
||||||
|
done
|
||||||
|
done
|
||||||
|
|
||||||
|
kill_gpu_processes
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
prepare_dataset() {
|
||||||
|
|
||||||
|
# download sharegpt dataset
|
||||||
|
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||||
|
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
||||||
|
|
||||||
|
# duplicate sonnet by 4x, to allow benchmarking with input length 2048
|
||||||
|
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||||
|
echo "" > sonnet_4x.txt
|
||||||
|
for _ in {1..4}
|
||||||
|
do
|
||||||
|
cat sonnet.txt >> sonnet_4x.txt
|
||||||
|
done
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
main() {
|
||||||
|
|
||||||
|
# check if the environment variable is successfully injected from yaml
|
||||||
|
|
||||||
|
check_gpus
|
||||||
|
check_hf_token
|
||||||
|
get_current_llm_serving_engine
|
||||||
|
|
||||||
|
pip install -U transformers
|
||||||
|
|
||||||
|
pip install -r requirements/dev.txt
|
||||||
|
which genai-perf
|
||||||
|
|
||||||
|
# check storage
|
||||||
|
df -h
|
||||||
|
|
||||||
|
ensure_installed wget
|
||||||
|
ensure_installed curl
|
||||||
|
ensure_installed jq
|
||||||
|
# genai-perf dependency
|
||||||
|
ensure_installed libb64-0d
|
||||||
|
|
||||||
|
prepare_dataset
|
||||||
|
|
||||||
|
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||||
|
declare -g RESULTS_FOLDER=results/
|
||||||
|
mkdir -p $RESULTS_FOLDER
|
||||||
|
BENCHMARK_ROOT="$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/"
|
||||||
|
|
||||||
|
# run the test
|
||||||
|
run_serving_tests "$BENCHMARK_ROOT/tests/nightly-tests.json"
|
||||||
|
|
||||||
|
# run genai-perf tests
|
||||||
|
run_genai_perf_tests "$BENCHMARK_ROOT/tests/genai-perf-tests.json"
|
||||||
|
mv artifacts/ $RESULTS_FOLDER/
|
||||||
|
|
||||||
|
# upload benchmark results to buildkite
|
||||||
|
python3 -m pip install tabulate pandas
|
||||||
|
python3 "$BENCHMARK_ROOT/scripts/summary-nightly-results.py"
|
||||||
|
upload_to_buildkite
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
@@ -0,0 +1,400 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script should be run inside the CI process
|
||||||
|
# This script assumes that we are already inside the vllm/ directory
|
||||||
|
# Benchmarking results will be available inside vllm/benchmarks/results/
|
||||||
|
|
||||||
|
# Do not set -e, as the mixtral 8x22B model tends to crash occasionally
|
||||||
|
# and we still want to see other benchmarking results even when mixtral crashes.
|
||||||
|
set -x
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
check_gpus() {
|
||||||
|
if command -v nvidia-smi; then
|
||||||
|
# check the number of GPUs and GPU type.
|
||||||
|
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||||
|
elif command -v amd-smi; then
|
||||||
|
declare -g gpu_count=$(amd-smi list | grep 'GPU' | wc -l)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $gpu_count -gt 0 ]]; then
|
||||||
|
echo "GPU found."
|
||||||
|
else
|
||||||
|
echo "Need at least 1 GPU to run benchmarking."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if command -v nvidia-smi; then
|
||||||
|
declare -g gpu_type=$(nvidia-smi --query-gpu=name --format=csv,noheader | awk '{print $2}')
|
||||||
|
elif command -v amd-smi; then
|
||||||
|
declare -g gpu_type=$(amd-smi static -g 0 -a | grep 'MARKET_NAME' | awk '{print $2}')
|
||||||
|
fi
|
||||||
|
echo "GPU type is $gpu_type"
|
||||||
|
}
|
||||||
|
|
||||||
|
check_hf_token() {
|
||||||
|
# check if HF_TOKEN is available and valid
|
||||||
|
if [[ -z "$HF_TOKEN" ]]; then
|
||||||
|
echo "Error: HF_TOKEN is not set."
|
||||||
|
exit 1
|
||||||
|
elif [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then
|
||||||
|
echo "Error: HF_TOKEN does not start with 'hf_'."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "HF_TOKEN is set and valid."
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure_sharegpt_downloaded() {
|
||||||
|
local FILE=ShareGPT_V3_unfiltered_cleaned_split.json
|
||||||
|
if [ ! -f "$FILE" ]; then
|
||||||
|
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/$FILE
|
||||||
|
else
|
||||||
|
echo "$FILE already exists."
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
json2args() {
|
||||||
|
# transforms the JSON string to command line args, and '_' is replaced to '-'
|
||||||
|
# example:
|
||||||
|
# input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 }
|
||||||
|
# output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1
|
||||||
|
local json_string=$1
|
||||||
|
local args=$(
|
||||||
|
echo "$json_string" | jq -r '
|
||||||
|
to_entries |
|
||||||
|
map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) |
|
||||||
|
join(" ")
|
||||||
|
'
|
||||||
|
)
|
||||||
|
echo "$args"
|
||||||
|
}
|
||||||
|
|
||||||
|
wait_for_server() {
|
||||||
|
# wait for vllm server to start
|
||||||
|
# return 1 if vllm server crashes
|
||||||
|
timeout 1200 bash -c '
|
||||||
|
until curl -X POST localhost:8000/v1/completions; do
|
||||||
|
sleep 1
|
||||||
|
done' && return 0 || return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
kill_processes_launched_by_current_bash() {
|
||||||
|
# Kill all python processes launched from current bash script
|
||||||
|
current_shell_pid=$$
|
||||||
|
processes=$(ps -eo pid,ppid,command | awk -v ppid="$current_shell_pid" -v proc="$1" '$2 == ppid && $3 ~ proc {print $1}')
|
||||||
|
if [ -n "$processes" ]; then
|
||||||
|
echo "Killing the following processes matching '$1':"
|
||||||
|
echo "$processes"
|
||||||
|
echo "$processes" | xargs kill -9
|
||||||
|
else
|
||||||
|
echo "No processes found matching '$1'."
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
kill_gpu_processes() {
|
||||||
|
|
||||||
|
ps -aux
|
||||||
|
lsof -t -i:8000 | xargs -r kill -9
|
||||||
|
pgrep python3 | xargs -r kill -9
|
||||||
|
|
||||||
|
|
||||||
|
# wait until GPU memory usage smaller than 1GB
|
||||||
|
if command -v nvidia-smi; then
|
||||||
|
while [ "$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits | head -n 1)" -ge 1000 ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
elif command -v amd-smi; then
|
||||||
|
while [ "$(amd-smi metric -g 0 | grep 'USED_VRAM' | awk '{print $2}')" -ge 1000 ]; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# remove vllm config file
|
||||||
|
rm -rf ~/.config/vllm
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
upload_to_buildkite() {
|
||||||
|
# upload the benchmarking results to buildkite
|
||||||
|
|
||||||
|
# if the agent binary is not found, skip uploading the results, exit 0
|
||||||
|
# Check if buildkite-agent is available in the PATH or at /workspace/buildkite-agent
|
||||||
|
if command -v buildkite-agent >/dev/null 2>&1; then
|
||||||
|
BUILDKITE_AGENT_COMMAND="buildkite-agent"
|
||||||
|
elif [ -f /workspace/buildkite-agent ]; then
|
||||||
|
BUILDKITE_AGENT_COMMAND="/workspace/buildkite-agent"
|
||||||
|
else
|
||||||
|
echo "buildkite-agent binary not found. Skip uploading the results."
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Use the determined command to annotate and upload artifacts
|
||||||
|
$BUILDKITE_AGENT_COMMAND annotate --style "info" --context "$BUILDKITE_LABEL-benchmark-results" < "$RESULTS_FOLDER/benchmark_results.md"
|
||||||
|
$BUILDKITE_AGENT_COMMAND artifact upload "$RESULTS_FOLDER/*"
|
||||||
|
}
|
||||||
|
|
||||||
|
run_latency_tests() {
|
||||||
|
# run latency tests using `benchmark_latency.py`
|
||||||
|
# $1: a json file specifying latency test cases
|
||||||
|
|
||||||
|
local latency_test_file
|
||||||
|
latency_test_file=$1
|
||||||
|
|
||||||
|
# Iterate over latency tests
|
||||||
|
jq -c '.[]' "$latency_test_file" | while read -r params; do
|
||||||
|
# get the test name, and append the GPU type back to it.
|
||||||
|
test_name=$(echo "$params" | jq -r '.test_name')
|
||||||
|
if [[ ! "$test_name" =~ ^latency_ ]]; then
|
||||||
|
echo "In latency-test.json, test_name must start with \"latency_\"."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||||
|
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||||
|
echo "Skip test case $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# get arguments
|
||||||
|
latency_params=$(echo "$params" | jq -r '.parameters')
|
||||||
|
latency_args=$(json2args "$latency_params")
|
||||||
|
|
||||||
|
# check if there is enough GPU to run the test
|
||||||
|
tp=$(echo "$latency_params" | jq -r '.tensor_parallel_size')
|
||||||
|
if [[ $gpu_count -lt $tp ]]; then
|
||||||
|
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
latency_command="python3 benchmark_latency.py \
|
||||||
|
--output-json $RESULTS_FOLDER/${test_name}.json \
|
||||||
|
$latency_args"
|
||||||
|
|
||||||
|
echo "Running test case $test_name"
|
||||||
|
echo "Latency command: $latency_command"
|
||||||
|
|
||||||
|
# recoding benchmarking command ang GPU command
|
||||||
|
jq_output=$(jq -n \
|
||||||
|
--arg latency "$latency_command" \
|
||||||
|
--arg gpu "$gpu_type" \
|
||||||
|
'{
|
||||||
|
latency_command: $latency,
|
||||||
|
gpu_type: $gpu
|
||||||
|
}')
|
||||||
|
echo "$jq_output" >"$RESULTS_FOLDER/$test_name.commands"
|
||||||
|
|
||||||
|
# run the benchmark
|
||||||
|
eval "$latency_command"
|
||||||
|
|
||||||
|
kill_gpu_processes
|
||||||
|
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
run_throughput_tests() {
|
||||||
|
# run throughput tests using `benchmark_throughput.py`
|
||||||
|
# $1: a json file specifying throughput test cases
|
||||||
|
|
||||||
|
local throughput_test_file
|
||||||
|
throughput_test_file=$1
|
||||||
|
|
||||||
|
# Iterate over throughput tests
|
||||||
|
jq -c '.[]' "$throughput_test_file" | while read -r params; do
|
||||||
|
# get the test name, and append the GPU type back to it.
|
||||||
|
test_name=$(echo "$params" | jq -r '.test_name')
|
||||||
|
if [[ ! "$test_name" =~ ^throughput_ ]]; then
|
||||||
|
echo "In throughput-test.json, test_name must start with \"throughput_\"."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||||
|
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||||
|
echo "Skip test case $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# get arguments
|
||||||
|
throughput_params=$(echo "$params" | jq -r '.parameters')
|
||||||
|
throughput_args=$(json2args "$throughput_params")
|
||||||
|
|
||||||
|
# check if there is enough GPU to run the test
|
||||||
|
tp=$(echo "$throughput_params" | jq -r '.tensor_parallel_size')
|
||||||
|
if [[ $gpu_count -lt $tp ]]; then
|
||||||
|
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
throughput_command="python3 benchmark_throughput.py \
|
||||||
|
--output-json $RESULTS_FOLDER/${test_name}.json \
|
||||||
|
$throughput_args"
|
||||||
|
|
||||||
|
echo "Running test case $test_name"
|
||||||
|
echo "Throughput command: $throughput_command"
|
||||||
|
# recoding benchmarking command ang GPU command
|
||||||
|
jq_output=$(jq -n \
|
||||||
|
--arg command "$throughput_command" \
|
||||||
|
--arg gpu "$gpu_type" \
|
||||||
|
'{
|
||||||
|
throughput_command: $command,
|
||||||
|
gpu_type: $gpu
|
||||||
|
}')
|
||||||
|
echo "$jq_output" >"$RESULTS_FOLDER/$test_name.commands"
|
||||||
|
|
||||||
|
# run the benchmark
|
||||||
|
eval "$throughput_command"
|
||||||
|
|
||||||
|
kill_gpu_processes
|
||||||
|
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
run_serving_tests() {
|
||||||
|
# run serving tests using `benchmark_serving.py`
|
||||||
|
# $1: a json file specifying serving test cases
|
||||||
|
|
||||||
|
local serving_test_file
|
||||||
|
serving_test_file=$1
|
||||||
|
|
||||||
|
# Iterate over serving tests
|
||||||
|
jq -c '.[]' "$serving_test_file" | while read -r params; do
|
||||||
|
# get the test name, and append the GPU type back to it.
|
||||||
|
test_name=$(echo "$params" | jq -r '.test_name')
|
||||||
|
if [[ ! "$test_name" =~ ^serving_ ]]; then
|
||||||
|
echo "In serving-test.json, test_name must start with \"serving_\"."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||||
|
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||||
|
echo "Skip test case $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# get client and server arguments
|
||||||
|
server_params=$(echo "$params" | jq -r '.server_parameters')
|
||||||
|
client_params=$(echo "$params" | jq -r '.client_parameters')
|
||||||
|
server_args=$(json2args "$server_params")
|
||||||
|
client_args=$(json2args "$client_params")
|
||||||
|
qps_list=$(echo "$params" | jq -r '.qps_list')
|
||||||
|
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
||||||
|
echo "Running over qps list $qps_list"
|
||||||
|
|
||||||
|
# check if there is enough GPU to run the test
|
||||||
|
tp=$(echo "$server_params" | jq -r '.tensor_parallel_size')
|
||||||
|
if [[ $gpu_count -lt $tp ]]; then
|
||||||
|
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check if server model and client model is aligned
|
||||||
|
server_model=$(echo "$server_params" | jq -r '.model')
|
||||||
|
client_model=$(echo "$client_params" | jq -r '.model')
|
||||||
|
if [[ $server_model != "$client_model" ]]; then
|
||||||
|
echo "Server model and client model must be the same. Skip testcase $test_name."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
server_command="python3 \
|
||||||
|
-m vllm.entrypoints.openai.api_server \
|
||||||
|
$server_args"
|
||||||
|
|
||||||
|
# run the server
|
||||||
|
echo "Running test case $test_name"
|
||||||
|
echo "Server command: $server_command"
|
||||||
|
bash -c "$server_command" &
|
||||||
|
server_pid=$!
|
||||||
|
|
||||||
|
# wait until the server is alive
|
||||||
|
if wait_for_server; then
|
||||||
|
echo ""
|
||||||
|
echo "vllm server is up and running."
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
echo "vllm failed to start within the timeout period."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# iterate over different QPS
|
||||||
|
for qps in $qps_list; do
|
||||||
|
# remove the surrounding single quote from qps
|
||||||
|
if [[ "$qps" == *"inf"* ]]; then
|
||||||
|
echo "qps was $qps"
|
||||||
|
qps="inf"
|
||||||
|
echo "now qps is $qps"
|
||||||
|
fi
|
||||||
|
|
||||||
|
new_test_name=$test_name"_qps_"$qps
|
||||||
|
|
||||||
|
# pass the tensor parallel size to the client so that it can be displayed
|
||||||
|
# on the benchmark dashboard
|
||||||
|
client_command="python3 benchmark_serving.py \
|
||||||
|
--save-result \
|
||||||
|
--result-dir $RESULTS_FOLDER \
|
||||||
|
--result-filename ${new_test_name}.json \
|
||||||
|
--request-rate $qps \
|
||||||
|
--metadata "tensor_parallel_size=$tp" \
|
||||||
|
$client_args"
|
||||||
|
|
||||||
|
echo "Running test case $test_name with qps $qps"
|
||||||
|
echo "Client command: $client_command"
|
||||||
|
|
||||||
|
bash -c "$client_command"
|
||||||
|
|
||||||
|
# record the benchmarking commands
|
||||||
|
jq_output=$(jq -n \
|
||||||
|
--arg server "$server_command" \
|
||||||
|
--arg client "$client_command" \
|
||||||
|
--arg gpu "$gpu_type" \
|
||||||
|
'{
|
||||||
|
server_command: $server,
|
||||||
|
client_command: $client,
|
||||||
|
gpu_type: $gpu
|
||||||
|
}')
|
||||||
|
echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands"
|
||||||
|
|
||||||
|
done
|
||||||
|
|
||||||
|
# clean up
|
||||||
|
kill -9 $server_pid
|
||||||
|
kill_gpu_processes
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
main() {
|
||||||
|
check_gpus
|
||||||
|
check_hf_token
|
||||||
|
|
||||||
|
# Set to v1 to run v1 benchmark
|
||||||
|
if [[ "${ENGINE_VERSION:-v0}" == "v1" ]]; then
|
||||||
|
export VLLM_USE_V1=1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# dependencies
|
||||||
|
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
||||||
|
(which jq) || (apt-get update && apt-get -y install jq)
|
||||||
|
(which lsof) || (apt-get update && apt-get install -y lsof)
|
||||||
|
|
||||||
|
# get the current IP address, required by benchmark_serving.py
|
||||||
|
export VLLM_HOST_IP=$(hostname -I | awk '{print $1}')
|
||||||
|
# turn of the reporting of the status of each request, to clean up the terminal output
|
||||||
|
export VLLM_LOGGING_LEVEL="WARNING"
|
||||||
|
|
||||||
|
# prepare for benchmarking
|
||||||
|
cd benchmarks || exit 1
|
||||||
|
ensure_sharegpt_downloaded
|
||||||
|
declare -g RESULTS_FOLDER=results/
|
||||||
|
mkdir -p $RESULTS_FOLDER
|
||||||
|
QUICK_BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/
|
||||||
|
|
||||||
|
# benchmarking
|
||||||
|
run_serving_tests $QUICK_BENCHMARK_ROOT/tests/serving-tests.json
|
||||||
|
run_latency_tests $QUICK_BENCHMARK_ROOT/tests/latency-tests.json
|
||||||
|
run_throughput_tests $QUICK_BENCHMARK_ROOT/tests/throughput-tests.json
|
||||||
|
|
||||||
|
# postprocess benchmarking results
|
||||||
|
pip install tabulate pandas
|
||||||
|
python3 $QUICK_BENCHMARK_ROOT/scripts/convert-results-json-to-markdown.py
|
||||||
|
|
||||||
|
upload_to_buildkite
|
||||||
|
}
|
||||||
|
|
||||||
|
main "$@"
|
||||||
@@ -0,0 +1,85 @@
|
|||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from tabulate import tabulate
|
||||||
|
|
||||||
|
results_folder = Path("results/")
|
||||||
|
|
||||||
|
# serving results and the keys that will be printed into markdown
|
||||||
|
serving_results = []
|
||||||
|
serving_column_mapping = {
|
||||||
|
"test_name": "Test name",
|
||||||
|
"gpu_type": "GPU",
|
||||||
|
"completed": "Successful req.",
|
||||||
|
"request_throughput": "Tput (req/s)",
|
||||||
|
"mean_ttft_ms": "Mean TTFT (ms)",
|
||||||
|
"std_ttft_ms": "Std TTFT (ms)",
|
||||||
|
"median_ttft_ms": "Median TTFT (ms)",
|
||||||
|
"mean_itl_ms": "Mean ITL (ms)",
|
||||||
|
"std_itl_ms": "Std ITL (ms)",
|
||||||
|
"median_itl_ms": "Median ITL (ms)",
|
||||||
|
"mean_tpot_ms": "Mean TPOT (ms)",
|
||||||
|
"std_tpot_ms": "Std TPOT (ms)",
|
||||||
|
"median_tpot_ms": "Median TPOT (ms)",
|
||||||
|
"total_token_throughput": "Total Token Tput (tok/s)",
|
||||||
|
"output_throughput": "Output Tput (tok/s)",
|
||||||
|
"total_input_tokens": "Total input tokens",
|
||||||
|
"total_output_tokens": "Total output tokens",
|
||||||
|
"engine": "Engine",
|
||||||
|
}
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
# collect results
|
||||||
|
for test_file in results_folder.glob("*.json"):
|
||||||
|
|
||||||
|
with open(test_file) as f:
|
||||||
|
raw_result = json.loads(f.read())
|
||||||
|
|
||||||
|
# attach the benchmarking command to raw_result
|
||||||
|
with open(test_file.with_suffix(".commands")) as f:
|
||||||
|
command = json.loads(f.read())
|
||||||
|
raw_result.update(command)
|
||||||
|
|
||||||
|
# update the test name of this result
|
||||||
|
raw_result.update({"test_name": test_file.stem})
|
||||||
|
|
||||||
|
# add the result to raw_result
|
||||||
|
serving_results.append(raw_result)
|
||||||
|
continue
|
||||||
|
|
||||||
|
serving_results = pd.DataFrame.from_dict(serving_results)
|
||||||
|
|
||||||
|
if not serving_results.empty:
|
||||||
|
serving_results = serving_results[list(
|
||||||
|
serving_column_mapping.keys())].rename(
|
||||||
|
columns=serving_column_mapping)
|
||||||
|
|
||||||
|
serving_md_table_with_headers = tabulate(serving_results,
|
||||||
|
headers='keys',
|
||||||
|
tablefmt='pipe',
|
||||||
|
showindex=False)
|
||||||
|
# remove the first line of header
|
||||||
|
serving_md_table_lines = serving_md_table_with_headers.split('\n')
|
||||||
|
serving_md_table_without_header = '\n'.join(serving_md_table_lines[2:])
|
||||||
|
|
||||||
|
prefix = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||||
|
prefix = prefix + "_" + os.environ.get("CURRENT_LLM_SERVING_ENGINE")
|
||||||
|
|
||||||
|
# document benchmarking results in markdown
|
||||||
|
with open(results_folder / f"{prefix}_nightly_results.md", "w") as f:
|
||||||
|
# document results with header.
|
||||||
|
# for those who wants to reproduce our benchmark.
|
||||||
|
f.write(serving_md_table_with_headers)
|
||||||
|
f.write('\n')
|
||||||
|
|
||||||
|
# document benchmarking results in json
|
||||||
|
with open(results_folder / f"{prefix}_nightly_results.json", "w") as f:
|
||||||
|
|
||||||
|
results = serving_results.to_dict(orient='records')
|
||||||
|
f.write(json.dumps(results))
|
||||||
23
.buildkite/nightly-benchmarks/scripts/wait-for-image.sh
Normal file
23
.buildkite/nightly-benchmarks/scripts/wait-for-image.sh
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
TOKEN=$(curl -s -L "https://public.ecr.aws/token?service=public.ecr.aws&scope=repository:q9t5s3a7/vllm-ci-postmerge-repo:pull" | jq -r .token)
|
||||||
|
if [[ "$BUILDKITE_BRANCH" == "main" ]]; then
|
||||||
|
URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-postmerge-repo/manifests/$BUILDKITE_COMMIT"
|
||||||
|
else
|
||||||
|
URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-test-repo/manifests/$BUILDKITE_COMMIT"
|
||||||
|
fi
|
||||||
|
|
||||||
|
TIMEOUT_SECONDS=10
|
||||||
|
|
||||||
|
retries=0
|
||||||
|
while [ $retries -lt 1000 ]; do
|
||||||
|
if [ "$(curl -s --max-time "$TIMEOUT_SECONDS" -L -H "Authorization: Bearer $TOKEN" -o /dev/null -w "%{http_code}" "$URL")" -eq 200 ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Waiting for image to be available..."
|
||||||
|
|
||||||
|
retries=$((retries + 1))
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
exit 1
|
||||||
@@ -11,7 +11,9 @@
|
|||||||
},
|
},
|
||||||
"vllm_server_parameters": {
|
"vllm_server_parameters": {
|
||||||
"disable_log_stats": "",
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
"gpu_memory_utilization": 0.9,
|
"gpu_memory_utilization": 0.9,
|
||||||
|
"num_scheduler_steps": 10,
|
||||||
"max_num_seqs": 512,
|
"max_num_seqs": 512,
|
||||||
"dtype": "bfloat16"
|
"dtype": "bfloat16"
|
||||||
},
|
},
|
||||||
@@ -35,7 +35,9 @@
|
|||||||
},
|
},
|
||||||
"vllm_server_parameters": {
|
"vllm_server_parameters": {
|
||||||
"disable_log_stats": "",
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
"gpu_memory_utilization": 0.9,
|
"gpu_memory_utilization": 0.9,
|
||||||
|
"num_scheduler_steps": 10,
|
||||||
"max_num_seqs": 512,
|
"max_num_seqs": 512,
|
||||||
"dtype": "bfloat16"
|
"dtype": "bfloat16"
|
||||||
},
|
},
|
||||||
@@ -88,7 +90,9 @@
|
|||||||
},
|
},
|
||||||
"vllm_server_parameters": {
|
"vllm_server_parameters": {
|
||||||
"disable_log_stats": "",
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
"gpu_memory_utilization": 0.9,
|
"gpu_memory_utilization": 0.9,
|
||||||
|
"num_scheduler_steps": 10,
|
||||||
"max_num_seqs": 512,
|
"max_num_seqs": 512,
|
||||||
"dtype": "bfloat16"
|
"dtype": "bfloat16"
|
||||||
},
|
},
|
||||||
@@ -141,7 +145,9 @@
|
|||||||
},
|
},
|
||||||
"vllm_server_parameters": {
|
"vllm_server_parameters": {
|
||||||
"disable_log_stats": "",
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
"gpu_memory_utilization": 0.9,
|
"gpu_memory_utilization": 0.9,
|
||||||
|
"num_scheduler_steps": 10,
|
||||||
"max_num_seqs": 512,
|
"max_num_seqs": 512,
|
||||||
"dtype": "bfloat16"
|
"dtype": "bfloat16"
|
||||||
},
|
},
|
||||||
@@ -191,7 +197,9 @@
|
|||||||
},
|
},
|
||||||
"vllm_server_parameters": {
|
"vllm_server_parameters": {
|
||||||
"disable_log_stats": "",
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
"gpu_memory_utilization": 0.9,
|
"gpu_memory_utilization": 0.9,
|
||||||
|
"num_scheduler_steps": 10,
|
||||||
"max_num_seqs": 512,
|
"max_num_seqs": 512,
|
||||||
"dtype": "bfloat16"
|
"dtype": "bfloat16"
|
||||||
},
|
},
|
||||||
@@ -243,7 +251,9 @@
|
|||||||
},
|
},
|
||||||
"vllm_server_parameters": {
|
"vllm_server_parameters": {
|
||||||
"disable_log_stats": "",
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
"gpu_memory_utilization": 0.9,
|
"gpu_memory_utilization": 0.9,
|
||||||
|
"num_scheduler_steps": 10,
|
||||||
"max_num_seqs": 512,
|
"max_num_seqs": 512,
|
||||||
"dtype": "bfloat16"
|
"dtype": "bfloat16"
|
||||||
},
|
},
|
||||||
@@ -295,7 +305,9 @@
|
|||||||
},
|
},
|
||||||
"vllm_server_parameters": {
|
"vllm_server_parameters": {
|
||||||
"disable_log_stats": "",
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
"gpu_memory_utilization": 0.9,
|
"gpu_memory_utilization": 0.9,
|
||||||
|
"num_scheduler_steps": 10,
|
||||||
"max_num_seqs": 512,
|
"max_num_seqs": 512,
|
||||||
"dtype": "bfloat16"
|
"dtype": "bfloat16"
|
||||||
},
|
},
|
||||||
@@ -7,6 +7,7 @@
|
|||||||
"tensor_parallel_size": 1,
|
"tensor_parallel_size": 1,
|
||||||
"swap_space": 16,
|
"swap_space": 16,
|
||||||
"disable_log_stats": "",
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
"load_format": "dummy"
|
"load_format": "dummy"
|
||||||
},
|
},
|
||||||
"client_parameters": {
|
"client_parameters": {
|
||||||
@@ -25,6 +26,7 @@
|
|||||||
"tensor_parallel_size": 4,
|
"tensor_parallel_size": 4,
|
||||||
"swap_space": 16,
|
"swap_space": 16,
|
||||||
"disable_log_stats": "",
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
"load_format": "dummy"
|
"load_format": "dummy"
|
||||||
},
|
},
|
||||||
"client_parameters": {
|
"client_parameters": {
|
||||||
@@ -43,6 +45,7 @@
|
|||||||
"tensor_parallel_size": 2,
|
"tensor_parallel_size": 2,
|
||||||
"swap_space": 16,
|
"swap_space": 16,
|
||||||
"disable_log_stats": "",
|
"disable_log_stats": "",
|
||||||
|
"disable_log_requests": "",
|
||||||
"load_format": "dummy"
|
"load_format": "dummy"
|
||||||
},
|
},
|
||||||
"client_parameters": {
|
"client_parameters": {
|
||||||
@@ -57,7 +60,8 @@
|
|||||||
"test_name": "serving_llama70B_tp4_sharegpt_specdecode",
|
"test_name": "serving_llama70B_tp4_sharegpt_specdecode",
|
||||||
"qps_list": [2],
|
"qps_list": [2],
|
||||||
"server_parameters": {
|
"server_parameters": {
|
||||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||||
|
"disable_log_requests": "",
|
||||||
"tensor_parallel_size": 4,
|
"tensor_parallel_size": 4,
|
||||||
"swap_space": 16,
|
"swap_space": 16,
|
||||||
"speculative_config": {
|
"speculative_config": {
|
||||||
@@ -1,181 +0,0 @@
|
|||||||
# vLLM benchmark suite
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This directory contains a benchmarking suite for **developers** to run locally and gain clarity on whether their PR improves/degrades vllm's performance.
|
|
||||||
vLLM also maintains a continuous performance benchmark under [perf.vllm.ai](https://perf.vllm.ai/), hosted under PyTorch CI HUD.
|
|
||||||
|
|
||||||
## Performance benchmark quick overview
|
|
||||||
|
|
||||||
**Benchmarking Coverage**: latency, throughput and fix-qps serving on B200, A100, H100, Intel® Xeon® Processors, Intel® Gaudi® 3 Accelerators and Arm® Neoverse™ with different models.
|
|
||||||
|
|
||||||
**Benchmarking Duration**: about 1hr.
|
|
||||||
|
|
||||||
**For benchmarking developers**: please try your best to constraint the duration of benchmarking to about 1 hr so that it won't take forever to run.
|
|
||||||
|
|
||||||
## Trigger the benchmark
|
|
||||||
|
|
||||||
The benchmark needs to be triggered manually:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
bash .buildkite/performance-benchmarks/scripts/run-performance-benchmarks.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
Runtime environment variables:
|
|
||||||
|
|
||||||
- `ON_CPU`: set the value to '1' on Intel® Xeon® and Arm® Neoverse™ Processors. Default value is 0.
|
|
||||||
- `SERVING_JSON`: JSON file to use for the serving tests. Default value is empty string (use default file).
|
|
||||||
- `LATENCY_JSON`: JSON file to use for the latency tests. Default value is empty string (use default file).
|
|
||||||
- `THROUGHPUT_JSON`: JSON file to use for the throughout tests. Default value is empty string (use default file).
|
|
||||||
- `REMOTE_HOST`: IP for the remote vLLM service to benchmark. Default value is empty string.
|
|
||||||
- `REMOTE_PORT`: Port for the remote vLLM service to benchmark. Default value is empty string.
|
|
||||||
|
|
||||||
## Performance benchmark details
|
|
||||||
|
|
||||||
See [performance-benchmarks-descriptions.md](performance-benchmarks-descriptions.md) for detailed descriptions, and use `tests/latency-tests.json`, `tests/throughput-tests.json`, `tests/serving-tests.json` to configure the test cases.
|
|
||||||
> NOTE: For Intel® Xeon® Processors, use `tests/latency-tests-cpu.json`, `tests/throughput-tests-cpu.json`, `tests/serving-tests-cpu.json` instead.
|
|
||||||
> For Intel® Gaudi® 3 Accelerators, use `tests/latency-tests-hpu.json`, `tests/throughput-tests-hpu.json`, `tests/serving-tests-hpu.json` instead.
|
|
||||||
> For Arm® Neoverse™, use `tests/latency-tests-arm64-cpu.json`, `tests/throughput-tests-arm64-cpu.json`, `tests/serving-tests-arm64-cpu.json` instead.
|
|
||||||
|
|
||||||
### Latency test
|
|
||||||
|
|
||||||
Here is an example of one test inside `latency-tests.json`:
|
|
||||||
|
|
||||||
```json
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"test_name": "latency_llama8B_tp1",
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3-8B",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"num_iters_warmup": 5,
|
|
||||||
"num_iters": 15
|
|
||||||
}
|
|
||||||
},
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example:
|
|
||||||
|
|
||||||
- The `test_name` attributes is a unique identifier for the test. In `latency-tests.json`, it must start with `latency_`.
|
|
||||||
- The `parameters` attribute control the command line arguments to be used for `vllm bench latency`. Note that please use underline `_` instead of the dash `-` when specifying the command line arguments, and `run-performance-benchmarks.sh` will convert the underline to dash when feeding the arguments to `vllm bench latency`. For example, the corresponding command line arguments for `vllm bench latency` will be `--model meta-llama/Meta-Llama-3-8B --tensor-parallel-size 1 --load-format dummy --num-iters-warmup 5 --num-iters 15`
|
|
||||||
|
|
||||||
Note that the performance numbers are highly sensitive to the value of the parameters. Please make sure the parameters are set correctly.
|
|
||||||
|
|
||||||
WARNING: The benchmarking script will save json results by itself, so please do not configure `--output-json` parameter in the json file.
|
|
||||||
|
|
||||||
### Throughput test
|
|
||||||
|
|
||||||
The tests are specified in `throughput-tests.json`. The syntax is similar to `latency-tests.json`, except for that the parameters will be fed forward to `vllm bench throughput`.
|
|
||||||
|
|
||||||
The number of this test is also stable -- a slight change on the value of this number might vary the performance numbers by a lot.
|
|
||||||
|
|
||||||
### Serving test
|
|
||||||
|
|
||||||
We test the throughput by using `vllm bench serve` with request rate = inf to cover the online serving overhead. The corresponding parameters are in `serving-tests.json`, and here is an example:
|
|
||||||
|
|
||||||
```json
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_sharegpt",
|
|
||||||
"qps_list": [1, 4, 16, "inf"],
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3-8B",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"swap_space": 16,
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"load_format": "dummy"
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3-8B",
|
|
||||||
"backend": "vllm",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
},
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
Inside this example:
|
|
||||||
|
|
||||||
- The `test_name` attribute is also a unique identifier for the test. It must start with `serving_`.
|
|
||||||
- The `server-parameters` includes the command line arguments for vLLM server.
|
|
||||||
- The `client-parameters` includes the command line arguments for `vllm bench serve`.
|
|
||||||
- The `qps_list` controls the list of qps for test. It will be used to configure the `--request-rate` parameter in `vllm bench serve`
|
|
||||||
|
|
||||||
The number of this test is less stable compared to the delay and latency benchmarks (due to randomized sharegpt dataset sampling inside `benchmark_serving.py`), but a large change on this number (e.g. 5% change) still vary the output greatly.
|
|
||||||
|
|
||||||
WARNING: The benchmarking script will save json results by itself, so please do not configure `--save-results` or other results-saving-related parameters in `serving-tests.json`.
|
|
||||||
|
|
||||||
#### Default Parameters Field
|
|
||||||
|
|
||||||
We can specify default parameters in a JSON field with key `defaults`. Parameters defined in the field are applied globally to all serving tests, and can be overridden in test case fields. Here is an example:
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary> An Example of default parameters field </summary>
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"defaults": {
|
|
||||||
"qps_list": [
|
|
||||||
"inf"
|
|
||||||
],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"block_size": 128,
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"load_format": "dummy"
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"backend": "vllm",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128,
|
|
||||||
"num_prompts": 200,
|
|
||||||
"ignore-eos": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tests": [
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama3B_tp2_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.2-3B-Instruct",
|
|
||||||
"tensor_parallel_size": 2,
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.2-3B-Instruct",
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_qwen3_tp4_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-14B",
|
|
||||||
"tensor_parallel_size": 4,
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-14B",
|
|
||||||
}
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### Visualizing the results
|
|
||||||
|
|
||||||
The `convert-results-json-to-markdown.py` helps you put the benchmarking results inside a markdown table, by formatting [descriptions.md](performance-benchmarks-descriptions.md) with real benchmarking results.
|
|
||||||
You can find the result presented as a table inside the `buildkite/performance-benchmark` job page.
|
|
||||||
If you do not see the table, please wait till the benchmark finish running.
|
|
||||||
The json version of the table (together with the json version of the benchmark) will be also attached to the markdown file.
|
|
||||||
The raw benchmarking results (in the format of json files) are in the `Artifacts` tab of the benchmarking.
|
|
||||||
|
|
||||||
#### Performance Results Comparison
|
|
||||||
|
|
||||||
Follow the instructions in [performance results comparison](https://docs.vllm.ai/en/latest/benchmarking/dashboard/#performance-results-comparison) to analyze performance results and the sizing guide.
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,414 +0,0 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import shlex
|
|
||||||
from importlib import util
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
import psutil
|
|
||||||
import regex as re
|
|
||||||
from tabulate import tabulate
|
|
||||||
|
|
||||||
# latency results and the keys that will be printed into markdown
|
|
||||||
latency_results = []
|
|
||||||
latency_column_mapping = {
|
|
||||||
"test_name": "Test name",
|
|
||||||
"gpu_type": "GPU",
|
|
||||||
"avg_latency": "Mean latency (ms)",
|
|
||||||
# "P10": "P10 (s)",
|
|
||||||
# "P25": "P25 (s)",
|
|
||||||
"P50": "Median latency (ms)",
|
|
||||||
# "P75": "P75 (s)",
|
|
||||||
# "P90": "P90 (s)",
|
|
||||||
"P99": "P99 latency (ms)",
|
|
||||||
}
|
|
||||||
|
|
||||||
# throughput tests and the keys that will be printed into markdown
|
|
||||||
throughput_results = []
|
|
||||||
throughput_results_column_mapping = {
|
|
||||||
"test_name": "Test name",
|
|
||||||
"gpu_type": "GPU",
|
|
||||||
"num_requests": "# of req.",
|
|
||||||
"total_num_tokens": "Total # of tokens",
|
|
||||||
"elapsed_time": "Elapsed time (s)",
|
|
||||||
"requests_per_second": "Tput (req/s)",
|
|
||||||
"tokens_per_second": "Tput (tok/s)",
|
|
||||||
}
|
|
||||||
|
|
||||||
# serving results and the keys that will be printed into markdown
|
|
||||||
serving_results = []
|
|
||||||
serving_column_mapping = {
|
|
||||||
"test_name": "Test name",
|
|
||||||
"model_id": "Model",
|
|
||||||
"dataset_name": "Dataset Name",
|
|
||||||
"input_len": "Input Len",
|
|
||||||
"output_len": "Output Len",
|
|
||||||
"tp_size": "TP Size",
|
|
||||||
"pp_size": "PP Size",
|
|
||||||
"dtype": "dtype",
|
|
||||||
"gpu_type": "GPU",
|
|
||||||
"completed": "# of req.",
|
|
||||||
"qps": "qps",
|
|
||||||
"max_concurrency": "# of max concurrency.",
|
|
||||||
"request_throughput": "Tput (req/s)",
|
|
||||||
"total_token_throughput": "Total Token Tput (tok/s)",
|
|
||||||
"output_throughput": "Output Tput (tok/s)",
|
|
||||||
# "total_input_tokens": "Total input tokens",
|
|
||||||
# "total_output_tokens": "Total output tokens",
|
|
||||||
"mean_ttft_ms": "Mean TTFT (ms)",
|
|
||||||
"median_ttft_ms": "Median TTFT (ms)",
|
|
||||||
"p99_ttft_ms": "P99 TTFT (ms)",
|
|
||||||
"std_ttft_ms": "STD TTFT (ms)",
|
|
||||||
"mean_tpot_ms": "Mean TPOT (ms)",
|
|
||||||
"median_tpot_ms": "Median",
|
|
||||||
"p99_tpot_ms": "P99",
|
|
||||||
"std_tpot_ms": "STD TPOT (ms)",
|
|
||||||
"mean_itl_ms": "Mean ITL (ms)",
|
|
||||||
"median_itl_ms": "Median ITL (ms)",
|
|
||||||
"p99_itl_ms": "P99 ITL (ms)",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def read_markdown(file):
|
|
||||||
if os.path.exists(file):
|
|
||||||
with open(file) as f:
|
|
||||||
return f.read() + "\n"
|
|
||||||
else:
|
|
||||||
return f"{file} not found.\n"
|
|
||||||
|
|
||||||
|
|
||||||
def results_to_json(latency, throughput, serving):
|
|
||||||
return json.dumps(
|
|
||||||
{
|
|
||||||
"latency": latency.to_dict(),
|
|
||||||
"throughput": throughput.to_dict(),
|
|
||||||
"serving": serving.to_dict(),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_size_with_unit(bytes, suffix="B"):
|
|
||||||
"""
|
|
||||||
Scale bytes to its proper format
|
|
||||||
e.g:
|
|
||||||
1253656 => '1.20MB'
|
|
||||||
1253656678 => '1.17GB'
|
|
||||||
"""
|
|
||||||
factor = 1024
|
|
||||||
for unit in ["", "K", "M", "G", "T", "P"]:
|
|
||||||
if bytes < factor:
|
|
||||||
return f"{bytes:.2f}{unit}{suffix}"
|
|
||||||
bytes /= factor
|
|
||||||
|
|
||||||
|
|
||||||
def _coerce(val: str) -> Any:
|
|
||||||
"""Best-effort type coercion from string to Python types."""
|
|
||||||
low = val.lower()
|
|
||||||
if low == "null":
|
|
||||||
return None
|
|
||||||
if low == "true":
|
|
||||||
return True
|
|
||||||
if low == "false":
|
|
||||||
return False
|
|
||||||
# integers
|
|
||||||
if re.fullmatch(r"[+-]?\d+", val):
|
|
||||||
try:
|
|
||||||
return int(val)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
# floats (keep 'inf'/'-inf'/'nan' as strings)
|
|
||||||
if re.fullmatch(r"[+-]?\d*\.\d+", val):
|
|
||||||
try:
|
|
||||||
return float(val)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
return val
|
|
||||||
|
|
||||||
|
|
||||||
def parse_client_command(cmd: str) -> dict[str, Any]:
|
|
||||||
"""Parse the client_command shell string into {executable, script, args}."""
|
|
||||||
toks = shlex.split(cmd)
|
|
||||||
if len(toks) < 2:
|
|
||||||
raise ValueError("client_command must include an executable and a script")
|
|
||||||
executable, script = toks[0], toks[1]
|
|
||||||
args: dict[str, Any] = {}
|
|
||||||
|
|
||||||
i = 2
|
|
||||||
while i < len(toks):
|
|
||||||
t = toks[i]
|
|
||||||
if t.startswith("--"):
|
|
||||||
# --key=value or --key (value) or boolean flag
|
|
||||||
if "=" in t:
|
|
||||||
key, val = t.split("=", 1)
|
|
||||||
if key == "--metadata":
|
|
||||||
md = {}
|
|
||||||
if val:
|
|
||||||
if "=" in val:
|
|
||||||
k, v = val.split("=", 1)
|
|
||||||
md[k] = _coerce(v)
|
|
||||||
else:
|
|
||||||
md[val] = True
|
|
||||||
args[key] = md
|
|
||||||
else:
|
|
||||||
args[key] = _coerce(val)
|
|
||||||
i += 1
|
|
||||||
continue
|
|
||||||
|
|
||||||
key = t
|
|
||||||
|
|
||||||
# Special: consume metadata k=v pairs until next --flag
|
|
||||||
if key == "--metadata":
|
|
||||||
i += 1
|
|
||||||
md = {}
|
|
||||||
while i < len(toks) and not toks[i].startswith("--"):
|
|
||||||
pair = toks[i]
|
|
||||||
if "=" in pair:
|
|
||||||
k, v = pair.split("=", 1)
|
|
||||||
md[k] = _coerce(v)
|
|
||||||
else:
|
|
||||||
md[pair] = True
|
|
||||||
i += 1
|
|
||||||
args[key] = md
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Standard: check if next token is a value (not a flag)
|
|
||||||
if i + 1 < len(toks) and not toks[i + 1].startswith("--"):
|
|
||||||
args[key] = _coerce(toks[i + 1])
|
|
||||||
i += 2
|
|
||||||
else:
|
|
||||||
# lone flag -> True
|
|
||||||
args[key] = True
|
|
||||||
i += 1
|
|
||||||
else:
|
|
||||||
# unexpected positional; skip
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
return {"executable": executable, "script": script, "args": args}
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument(
|
|
||||||
"-r",
|
|
||||||
"--result",
|
|
||||||
type=str,
|
|
||||||
default="results",
|
|
||||||
help="Folder name for benchmark output results.",
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
results_folder = Path(args.result)
|
|
||||||
if not results_folder.exists():
|
|
||||||
raise FileNotFoundError(f"results folder does not exist: {results_folder}")
|
|
||||||
# collect results
|
|
||||||
for test_file in results_folder.glob("*.json"):
|
|
||||||
with open(test_file) as f:
|
|
||||||
raw_result = json.loads(f.read())
|
|
||||||
|
|
||||||
if "serving" in str(test_file):
|
|
||||||
# this result is generated via `vllm bench serve` command
|
|
||||||
# attach the benchmarking command to raw_result
|
|
||||||
try:
|
|
||||||
with open(test_file.with_suffix(".commands")) as f:
|
|
||||||
command = json.loads(f.read())
|
|
||||||
except OSError as e:
|
|
||||||
print(e)
|
|
||||||
continue
|
|
||||||
# Parse Server Command Arg
|
|
||||||
out: dict[str, Any] = {
|
|
||||||
"server_command": parse_client_command(command["server_command"])
|
|
||||||
}
|
|
||||||
parse_args = [
|
|
||||||
"--tensor-parallel-size",
|
|
||||||
"--pipeline-parallel-size",
|
|
||||||
"--dtype",
|
|
||||||
]
|
|
||||||
col_mapping = ["tp_size", "pp_size", "dtype"]
|
|
||||||
for index, arg in enumerate(parse_args):
|
|
||||||
if arg in out["server_command"]["args"]:
|
|
||||||
raw_result.update(
|
|
||||||
{col_mapping[index]: out["server_command"]["args"][arg]}
|
|
||||||
)
|
|
||||||
|
|
||||||
# Parse Client Command Arg
|
|
||||||
out: dict[str, Any] = {
|
|
||||||
"client_command": parse_client_command(command["client_command"])
|
|
||||||
}
|
|
||||||
parse_args = [
|
|
||||||
"--dataset-name",
|
|
||||||
"--random-input-len",
|
|
||||||
"--random-output-len",
|
|
||||||
"--request-rate",
|
|
||||||
]
|
|
||||||
col_mapping = ["dataset_name", "input_len", "output_len", "qps"]
|
|
||||||
|
|
||||||
for index, arg in enumerate(parse_args):
|
|
||||||
if arg in out["client_command"]["args"]:
|
|
||||||
raw_result.update(
|
|
||||||
{col_mapping[index]: out["client_command"]["args"][arg]}
|
|
||||||
)
|
|
||||||
# Add Server, Client command
|
|
||||||
raw_result.update(command)
|
|
||||||
|
|
||||||
# update the test name of this result
|
|
||||||
raw_result.update({"test_name": test_file.stem})
|
|
||||||
# add the result to raw_result
|
|
||||||
serving_results.append(raw_result)
|
|
||||||
continue
|
|
||||||
|
|
||||||
elif "latency" in f.name:
|
|
||||||
# this result is generated via `vllm bench latency` command
|
|
||||||
|
|
||||||
# attach the benchmarking command to raw_result
|
|
||||||
try:
|
|
||||||
with open(test_file.with_suffix(".commands")) as f:
|
|
||||||
command = json.loads(f.read())
|
|
||||||
except OSError as e:
|
|
||||||
print(e)
|
|
||||||
continue
|
|
||||||
|
|
||||||
raw_result.update(command)
|
|
||||||
|
|
||||||
# update the test name of this result
|
|
||||||
raw_result.update({"test_name": test_file.stem})
|
|
||||||
|
|
||||||
# get different percentiles
|
|
||||||
for perc in [10, 25, 50, 75, 90, 99]:
|
|
||||||
# Multiply 1000 to convert the time unit from s to ms
|
|
||||||
raw_result.update(
|
|
||||||
{f"P{perc}": 1000 * raw_result["percentiles"][str(perc)]}
|
|
||||||
)
|
|
||||||
raw_result["avg_latency"] = raw_result["avg_latency"] * 1000
|
|
||||||
|
|
||||||
# add the result to raw_result
|
|
||||||
latency_results.append(raw_result)
|
|
||||||
continue
|
|
||||||
|
|
||||||
elif "throughput" in f.name:
|
|
||||||
# this result is generated via `vllm bench throughput` command
|
|
||||||
|
|
||||||
# attach the benchmarking command to raw_result
|
|
||||||
try:
|
|
||||||
with open(test_file.with_suffix(".commands")) as f:
|
|
||||||
command = json.loads(f.read())
|
|
||||||
except OSError as e:
|
|
||||||
print(e)
|
|
||||||
continue
|
|
||||||
|
|
||||||
raw_result.update(command)
|
|
||||||
|
|
||||||
# update the test name of this result
|
|
||||||
raw_result.update({"test_name": test_file.stem})
|
|
||||||
|
|
||||||
# add the result to raw_result
|
|
||||||
throughput_results.append(raw_result)
|
|
||||||
continue
|
|
||||||
|
|
||||||
print(f"Skipping {test_file}")
|
|
||||||
|
|
||||||
latency_results = pd.DataFrame.from_dict(latency_results)
|
|
||||||
serving_results = pd.DataFrame.from_dict(serving_results)
|
|
||||||
throughput_results = pd.DataFrame.from_dict(throughput_results)
|
|
||||||
|
|
||||||
svmem = psutil.virtual_memory()
|
|
||||||
platform_data = {
|
|
||||||
"Physical cores": [psutil.cpu_count(logical=False)],
|
|
||||||
"Total cores": [psutil.cpu_count(logical=True)],
|
|
||||||
"Total Memory": [get_size_with_unit(svmem.total)],
|
|
||||||
}
|
|
||||||
|
|
||||||
if util.find_spec("numa") is not None:
|
|
||||||
from numa import info
|
|
||||||
|
|
||||||
platform_data["Total NUMA nodes"] = [info.get_num_configured_nodes()]
|
|
||||||
|
|
||||||
if util.find_spec("cpuinfo") is not None:
|
|
||||||
from cpuinfo import get_cpu_info
|
|
||||||
|
|
||||||
platform_data["CPU Brand"] = [get_cpu_info()["brand_raw"]]
|
|
||||||
|
|
||||||
platform_results = pd.DataFrame.from_dict(
|
|
||||||
platform_data, orient="index", columns=["Platform Info"]
|
|
||||||
)
|
|
||||||
|
|
||||||
raw_results_json = results_to_json(
|
|
||||||
latency_results, throughput_results, serving_results
|
|
||||||
)
|
|
||||||
|
|
||||||
# remapping the key, for visualization purpose
|
|
||||||
if not latency_results.empty:
|
|
||||||
latency_results = latency_results[list(latency_column_mapping.keys())].rename(
|
|
||||||
columns=latency_column_mapping
|
|
||||||
)
|
|
||||||
if not serving_results.empty:
|
|
||||||
valid_columns = [
|
|
||||||
col for col in serving_column_mapping if col in serving_results.columns
|
|
||||||
]
|
|
||||||
serving_results = serving_results[valid_columns].rename(
|
|
||||||
columns=serving_column_mapping
|
|
||||||
)
|
|
||||||
if not throughput_results.empty:
|
|
||||||
throughput_results = throughput_results[
|
|
||||||
list(throughput_results_column_mapping.keys())
|
|
||||||
].rename(columns=throughput_results_column_mapping)
|
|
||||||
|
|
||||||
processed_results_json = results_to_json(
|
|
||||||
latency_results, throughput_results, serving_results
|
|
||||||
)
|
|
||||||
|
|
||||||
for df in [latency_results, serving_results, throughput_results]:
|
|
||||||
if df.empty:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Sort all dataframes by their respective "Test name" columns
|
|
||||||
df.sort_values(by="Test name", inplace=True)
|
|
||||||
|
|
||||||
# The GPUs sometimes come in format of "GPUTYPE\nGPUTYPE\n...",
|
|
||||||
# we want to turn it into "8xGPUTYPE"
|
|
||||||
df["GPU"] = df["GPU"].apply(
|
|
||||||
lambda x: "{}x{}".format(len(x.split("\n")), x.split("\n")[0])
|
|
||||||
)
|
|
||||||
|
|
||||||
# get markdown tables
|
|
||||||
latency_md_table = tabulate(
|
|
||||||
latency_results, headers="keys", tablefmt="pipe", showindex=False
|
|
||||||
)
|
|
||||||
serving_md_table = tabulate(
|
|
||||||
serving_results, headers="keys", tablefmt="pipe", showindex=False
|
|
||||||
)
|
|
||||||
throughput_md_table = tabulate(
|
|
||||||
throughput_results, headers="keys", tablefmt="pipe", showindex=False
|
|
||||||
)
|
|
||||||
platform_md_table = tabulate(
|
|
||||||
platform_results, headers="keys", tablefmt="pipe", showindex=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# document the result
|
|
||||||
md_file = "benchmark_results.md"
|
|
||||||
json_file = "benchmark_results.json"
|
|
||||||
with open(results_folder / md_file, "w") as f:
|
|
||||||
results = read_markdown(
|
|
||||||
"../.buildkite/performance-benchmarks/"
|
|
||||||
"performance-benchmarks-descriptions.md"
|
|
||||||
)
|
|
||||||
results = results.format(
|
|
||||||
latency_tests_markdown_table=latency_md_table,
|
|
||||||
throughput_tests_markdown_table=throughput_md_table,
|
|
||||||
serving_tests_markdown_table=serving_md_table,
|
|
||||||
platform_markdown_table=platform_md_table,
|
|
||||||
benchmarking_results_in_json_string=processed_results_json,
|
|
||||||
)
|
|
||||||
f.write(results)
|
|
||||||
|
|
||||||
# document benchmarking results in json
|
|
||||||
with open(results_folder / json_file, "w") as f:
|
|
||||||
results = (
|
|
||||||
latency_results.to_dict(orient="records")
|
|
||||||
+ throughput_results.to_dict(orient="records")
|
|
||||||
+ serving_results.to_dict(orient="records")
|
|
||||||
)
|
|
||||||
f.write(json.dumps(results))
|
|
||||||
@@ -1,539 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# This script assumes that we are already inside the vllm/ directory
|
|
||||||
# Benchmarking results will be available inside vllm/benchmarks/results/
|
|
||||||
|
|
||||||
# Do not set -e, as the mixtral 8x22B model tends to crash occasionally
|
|
||||||
# and we still want to see other benchmarking results even when mixtral crashes.
|
|
||||||
set -x
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
# Environment-driven debug controls (like ON_CPU=1)
|
|
||||||
DRY_RUN="${DRY_RUN:-0}"
|
|
||||||
MODEL_FILTER="${MODEL_FILTER:-}"
|
|
||||||
DTYPE_FILTER="${DTYPE_FILTER:-}"
|
|
||||||
|
|
||||||
check_gpus() {
|
|
||||||
if command -v nvidia-smi; then
|
|
||||||
# check the number of GPUs and GPU type.
|
|
||||||
declare -g gpu_count=$(nvidia-smi --list-gpus | grep -c . || true)
|
|
||||||
elif command -v amd-smi; then
|
|
||||||
declare -g gpu_count=$(amd-smi list | grep -c 'GPU' || true)
|
|
||||||
elif command -v hl-smi; then
|
|
||||||
declare -g gpu_count=$(hl-smi --list | grep -ci "Module ID" || true)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $gpu_count -gt 0 ]]; then
|
|
||||||
echo "GPU found."
|
|
||||||
else
|
|
||||||
echo "Need at least 1 GPU to run benchmarking."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
declare -g arch_suffix=''
|
|
||||||
|
|
||||||
if command -v nvidia-smi; then
|
|
||||||
declare -g gpu_type=$(nvidia-smi --query-gpu=name --format=csv,noheader | awk '{print $2}')
|
|
||||||
elif command -v amd-smi; then
|
|
||||||
declare -g gpu_type=$(amd-smi static -g 0 -a | grep 'MARKET_NAME' | awk '{print $2}')
|
|
||||||
elif command -v hl-smi; then
|
|
||||||
declare -g gpu_type=$(hl-smi -q | grep "Product Name" | head -n 1 | awk -F ':' '{print $2}' | sed 's/^ *//')
|
|
||||||
arch_suffix='-hpu'
|
|
||||||
fi
|
|
||||||
echo "GPU type is $gpu_type"
|
|
||||||
}
|
|
||||||
|
|
||||||
check_cpus() {
|
|
||||||
# check the number of CPUs and NUMA Node and GPU type.
|
|
||||||
declare -g numa_count=$(lscpu | grep "NUMA node(s):" | awk '{print $3}')
|
|
||||||
if [[ $numa_count -gt 0 ]]; then
|
|
||||||
echo "NUMA found."
|
|
||||||
echo "$numa_count"
|
|
||||||
else
|
|
||||||
echo "Need at least 1 NUMA to run benchmarking."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
if [[ "$(uname -m)" == "aarch64" ]] || [[ "$(uname -m)" == "arm64" ]]; then
|
|
||||||
declare -g gpu_type="arm64-cpu"
|
|
||||||
else
|
|
||||||
declare -g gpu_type="cpu"
|
|
||||||
fi
|
|
||||||
echo "GPU type is $gpu_type"
|
|
||||||
}
|
|
||||||
|
|
||||||
check_hf_token() {
|
|
||||||
# check if HF_TOKEN is available and valid
|
|
||||||
if [[ -z "$HF_TOKEN" ]]; then
|
|
||||||
echo "Error: HF_TOKEN is not set."
|
|
||||||
exit 1
|
|
||||||
elif [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then
|
|
||||||
echo "Error: HF_TOKEN does not start with 'hf_'."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "HF_TOKEN is set and valid."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
ensure_sharegpt_downloaded() {
|
|
||||||
local FILE=ShareGPT_V3_unfiltered_cleaned_split.json
|
|
||||||
if [ ! -f "$FILE" ]; then
|
|
||||||
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/$FILE
|
|
||||||
else
|
|
||||||
echo "$FILE already exists."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
json2args() {
|
|
||||||
# transforms the JSON string to command line args, and '_' is replaced to '-'
|
|
||||||
# example:
|
|
||||||
# input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 }
|
|
||||||
# output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1
|
|
||||||
local json_string=$1
|
|
||||||
local args=$(
|
|
||||||
echo "$json_string" | jq -r '
|
|
||||||
to_entries |
|
|
||||||
map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) |
|
|
||||||
join(" ")
|
|
||||||
'
|
|
||||||
)
|
|
||||||
echo "$args"
|
|
||||||
}
|
|
||||||
|
|
||||||
json2envs() {
|
|
||||||
# transforms the JSON string to environment variables.
|
|
||||||
# example:
|
|
||||||
# input: { "VLLM_CPU_KVCACHE_SPACE": 5 }
|
|
||||||
# output: VLLM_CPU_KVCACHE_SPACE=5
|
|
||||||
local json_string=$1
|
|
||||||
local args=$(
|
|
||||||
echo "$json_string" | jq -r '
|
|
||||||
to_entries |
|
|
||||||
map((.key ) + "=" + (.value | tostring)) |
|
|
||||||
join(" ")
|
|
||||||
'
|
|
||||||
)
|
|
||||||
echo "$args"
|
|
||||||
}
|
|
||||||
|
|
||||||
wait_for_server() {
|
|
||||||
local timeout_val="1200"
|
|
||||||
timeout "$timeout_val" bash -c '
|
|
||||||
until curl -sf http://localhost:8000/v1/models >/dev/null; do
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
'
|
|
||||||
}
|
|
||||||
|
|
||||||
kill_processes_launched_by_current_bash() {
|
|
||||||
# Kill all python processes launched from current bash script
|
|
||||||
current_shell_pid=$$
|
|
||||||
processes=$(ps -eo pid,ppid,command | awk -v ppid="$current_shell_pid" -v proc="$1" '$2 == ppid && $3 ~ proc {print $1}')
|
|
||||||
if [ -n "$processes" ]; then
|
|
||||||
echo "Killing the following processes matching '$1':"
|
|
||||||
echo "$processes"
|
|
||||||
echo "$processes" | xargs kill -9
|
|
||||||
else
|
|
||||||
echo "No processes found matching '$1'."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
kill_gpu_processes() {
|
|
||||||
|
|
||||||
ps -aux
|
|
||||||
lsof -t -i:8000 | xargs -r kill -9
|
|
||||||
pgrep python3 | xargs -r kill -9
|
|
||||||
# vLLM now names the process with VLLM prefix after https://github.com/vllm-project/vllm/pull/21445
|
|
||||||
pgrep VLLM | xargs -r kill -9
|
|
||||||
|
|
||||||
# wait until GPU memory usage smaller than 1GB
|
|
||||||
if command -v nvidia-smi; then
|
|
||||||
while [ "$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits | head -n 1)" -ge 1000 ]; do
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
elif command -v amd-smi; then
|
|
||||||
while [ "$(amd-smi metric -g 0 | grep 'USED_VRAM' | awk '{print $2}')" -ge 1000 ]; do
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
elif command -v hl-smi; then
|
|
||||||
while [ "$(hl-smi -q | grep "Used" | head -n 1 | awk '{print $3}')" -ge 1000 ]; do
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# remove vllm config file
|
|
||||||
rm -rf ~/.config/vllm
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
upload_to_buildkite() {
|
|
||||||
# upload the benchmarking results to buildkite
|
|
||||||
|
|
||||||
# if the agent binary is not found, skip uploading the results, exit 0
|
|
||||||
# Check if buildkite-agent is available in the PATH or at /workspace/buildkite-agent
|
|
||||||
if command -v buildkite-agent >/dev/null 2>&1; then
|
|
||||||
BUILDKITE_AGENT_COMMAND="buildkite-agent"
|
|
||||||
elif [ -f /workspace/buildkite-agent ]; then
|
|
||||||
BUILDKITE_AGENT_COMMAND="/workspace/buildkite-agent"
|
|
||||||
else
|
|
||||||
echo "buildkite-agent binary not found. Skip uploading the results."
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Use the determined command to annotate and upload artifacts
|
|
||||||
$BUILDKITE_AGENT_COMMAND annotate --style "info" --context "$BUILDKITE_LABEL-benchmark-results" < "$RESULTS_FOLDER/benchmark_results.md"
|
|
||||||
$BUILDKITE_AGENT_COMMAND artifact upload "$RESULTS_FOLDER/*"
|
|
||||||
}
|
|
||||||
|
|
||||||
run_benchmark_tests() {
|
|
||||||
# run benchmark tests using `vllm bench <test_type>` command
|
|
||||||
# $1: test type (latency or throughput)
|
|
||||||
# $2: a json file specifying test cases
|
|
||||||
|
|
||||||
local test_type=$1
|
|
||||||
local test_file=$2
|
|
||||||
|
|
||||||
# Iterate over tests
|
|
||||||
jq -c '.[]' "$test_file" | while read -r params; do
|
|
||||||
# get the test name, and append the GPU type back to it.
|
|
||||||
test_name=$(echo "$params" | jq -r '.test_name')
|
|
||||||
if [[ ! "$test_name" =~ ^${test_type}_ ]]; then
|
|
||||||
echo "In ${test_type}-test.json, test_name must start with \"${test_type}_\"."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
|
||||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
|
||||||
echo "Skip test case $test_name."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# get arguments
|
|
||||||
bench_params=$(echo "$params" | jq -r '.parameters')
|
|
||||||
bench_args=$(json2args "$bench_params")
|
|
||||||
bench_environment_variables=$(echo "$params" | jq -r '.environment_variables')
|
|
||||||
bench_envs=$(json2envs "$bench_environment_variables")
|
|
||||||
|
|
||||||
# check if there is enough GPU to run the test
|
|
||||||
tp=$(echo "$bench_params" | jq -r '.tensor_parallel_size')
|
|
||||||
if [[ "$ON_CPU" == "1" ]]; then
|
|
||||||
pp=$(echo "$bench_params" | jq -r '.pipeline_parallel_size // 1')
|
|
||||||
world_size=$(($tp*$pp))
|
|
||||||
if [[ $numa_count -lt $world_size && -z "${REMOTE_HOST}" ]]; then
|
|
||||||
echo "Required world-size $world_size but only $numa_count NUMA nodes found. Skip testcase $test_name."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
if [[ $gpu_count -lt $tp ]]; then
|
|
||||||
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
bench_command=" $bench_envs vllm bench $test_type \
|
|
||||||
--output-json $RESULTS_FOLDER/${test_name}.json \
|
|
||||||
$bench_args"
|
|
||||||
|
|
||||||
echo "Running test case $test_name"
|
|
||||||
echo "${test_type^} command: $bench_command"
|
|
||||||
|
|
||||||
# recording benchmarking command and GPU command
|
|
||||||
jq_output=$(jq -n \
|
|
||||||
--arg command "$bench_command" \
|
|
||||||
--arg gpu "$gpu_type" \
|
|
||||||
--arg test_type "$test_type" \
|
|
||||||
'{
|
|
||||||
($test_type + "_command"): $command,
|
|
||||||
gpu_type: $gpu
|
|
||||||
}')
|
|
||||||
echo "$jq_output" >"$RESULTS_FOLDER/$test_name.commands"
|
|
||||||
|
|
||||||
# run the benchmark
|
|
||||||
eval "$bench_command"
|
|
||||||
|
|
||||||
kill_gpu_processes
|
|
||||||
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
run_latency_tests() { run_benchmark_tests "latency" "$1"; }
|
|
||||||
run_startup_tests() { run_benchmark_tests "startup" "$1"; }
|
|
||||||
run_throughput_tests() { run_benchmark_tests "throughput" "$1"; }
|
|
||||||
|
|
||||||
merge_serving_tests_stream() {
|
|
||||||
# Emit merged serving test objects, optionally filtered by MODEL_FILTER/DTYPE_FILTER in DRY_RUN mode.
|
|
||||||
# This helper does NOT modify JSON; it only filters the stream in dry-run mode.
|
|
||||||
local serving_test_file="$1"
|
|
||||||
# shellcheck disable=SC2016
|
|
||||||
local merged='
|
|
||||||
if type == "array" then
|
|
||||||
# Plain format: test cases array
|
|
||||||
.[]
|
|
||||||
elif (type == "object" and has("tests")) then
|
|
||||||
# merge the default parameters into each test cases
|
|
||||||
. as $root
|
|
||||||
| ($root.defaults // {}) as $d
|
|
||||||
| ($root.tests // [])[]
|
|
||||||
# default qps / max_concurrency from defaults if missing
|
|
||||||
| .qps_list = (.qps_list // $d.qps_list)
|
|
||||||
| .max_concurrency_list = (.max_concurrency_list // $d.max_concurrency_list)
|
|
||||||
# merge envs / params: test overrides defaults
|
|
||||||
| .server_environment_variables =
|
|
||||||
(($d.server_environment_variables // {}) + (.server_environment_variables // {}))
|
|
||||||
| .server_parameters =
|
|
||||||
(($d.server_parameters // {}) + (.server_parameters // {}))
|
|
||||||
| .client_parameters =
|
|
||||||
(($d.client_parameters // {}) + (.client_parameters // {}))
|
|
||||||
else
|
|
||||||
error("Unsupported serving test file format: must be array or object with .tests")
|
|
||||||
end
|
|
||||||
'
|
|
||||||
|
|
||||||
jq -c "$merged" "$serving_test_file" | \
|
|
||||||
if [[ "${DRY_RUN:-0}" == "1" && ( "${MODEL_FILTER}${DTYPE_FILTER}" != "" ) ]]; then
|
|
||||||
jq -c --arg model "$MODEL_FILTER" --arg dtype "$DTYPE_FILTER" '
|
|
||||||
select((($model|length)==0)
|
|
||||||
or ((.server_parameters.model // "") == $model)
|
|
||||||
or ((.client_parameters.model // "") == $model))
|
|
||||||
| select((($dtype|length)==0) or ((.server_parameters.dtype // "") == $dtype))
|
|
||||||
'
|
|
||||||
else
|
|
||||||
cat
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
run_serving_tests() {
|
|
||||||
# run serving tests using `vllm bench serve` command
|
|
||||||
# $1: a json file specifying serving test cases
|
|
||||||
#
|
|
||||||
# Supported JSON formats:
|
|
||||||
# 1) Plain format: top-level array
|
|
||||||
# [ { "test_name": "...", "server_parameters": {...}, ... }, ... ]
|
|
||||||
#
|
|
||||||
# 2) Default parameters field + plain format tests
|
|
||||||
# {
|
|
||||||
# "defaults": { ... },
|
|
||||||
# "tests": [ { "test_name": "...", "server_parameters": {...}, ... }, ... ]
|
|
||||||
# }
|
|
||||||
|
|
||||||
local serving_test_file
|
|
||||||
serving_test_file=$1
|
|
||||||
|
|
||||||
# In dry-run mode, if filters are provided but no tests match, fail fast.
|
|
||||||
if [[ "${DRY_RUN:-0}" == "1" && ( "${MODEL_FILTER}${DTYPE_FILTER}" != "" ) ]]; then
|
|
||||||
local count
|
|
||||||
count=$(merge_serving_tests_stream "$serving_test_file" | wc -l | tr -d ' ')
|
|
||||||
if [[ "$count" -eq 0 ]]; then
|
|
||||||
echo "No matching serving tests found in $serving_test_file for model='$MODEL_FILTER' dtype='$DTYPE_FILTER'." >&2
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Iterate over serving tests (merged + optional filtered stream)
|
|
||||||
merge_serving_tests_stream "$serving_test_file" | while read -r params; do
|
|
||||||
# get the test name, and append the GPU type back to it.
|
|
||||||
test_name=$(echo "$params" | jq -r '.test_name')
|
|
||||||
if [[ ! "$test_name" =~ ^serving_ ]]; then
|
|
||||||
echo "In serving-test.json, test_name must start with \"serving_\"."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
|
||||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
|
||||||
echo "Skip test case $test_name."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# get client and server arguments (after merged the default parameters)
|
|
||||||
server_params=$(echo "$params" | jq -r '.server_parameters')
|
|
||||||
server_envs=$(echo "$params" | jq -r '.server_environment_variables')
|
|
||||||
client_params=$(echo "$params" | jq -r '.client_parameters')
|
|
||||||
|
|
||||||
server_args=$(json2args "$server_params")
|
|
||||||
server_envs=$(json2envs "$server_envs")
|
|
||||||
client_args=$(json2args "$client_params")
|
|
||||||
|
|
||||||
# qps_list
|
|
||||||
qps_list=$(echo "$params" | jq -r '.qps_list')
|
|
||||||
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
|
||||||
echo "Running over qps list $qps_list"
|
|
||||||
|
|
||||||
# max_concurrency_list (fallback to num_prompts if missing)
|
|
||||||
max_concurrency_list=$(echo "$params" | jq -r '.max_concurrency_list')
|
|
||||||
if [[ -z "$max_concurrency_list" || "$max_concurrency_list" == "null" ]]; then
|
|
||||||
num_prompts=$(echo "$client_params" | jq -r '.num_prompts')
|
|
||||||
max_concurrency_list="[$num_prompts]"
|
|
||||||
fi
|
|
||||||
max_concurrency_list=$(echo "$max_concurrency_list" | jq -r '.[] | @sh')
|
|
||||||
echo "Running over max concurrency list $max_concurrency_list"
|
|
||||||
|
|
||||||
# check if there is enough resources to run the test
|
|
||||||
tp=$(echo "$server_params" | jq -r '.tensor_parallel_size')
|
|
||||||
if [[ "$ON_CPU" == "1" ]]; then
|
|
||||||
pp=$(echo "$server_params" | jq -r '.pipeline_parallel_size // 1')
|
|
||||||
world_size=$(($tp*$pp))
|
|
||||||
if [[ $numa_count -lt $world_size && -z "${REMOTE_HOST}" ]]; then
|
|
||||||
echo "Required world-size $world_size but only $numa_count NUMA nodes found. Skip testcase $test_name."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
if [[ $gpu_count -lt $tp ]]; then
|
|
||||||
echo "Required tensor-parallel-size $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# check if server model and client model is aligned
|
|
||||||
server_model=$(echo "$server_params" | jq -r '.model')
|
|
||||||
client_model=$(echo "$client_params" | jq -r '.model')
|
|
||||||
if [[ $server_model != "$client_model" ]]; then
|
|
||||||
echo "Server model and client model must be the same. Skip testcase $test_name."
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
server_command="$server_envs vllm serve \
|
|
||||||
$server_args"
|
|
||||||
|
|
||||||
# run the server
|
|
||||||
echo "Running test case $test_name"
|
|
||||||
echo "Server command: $server_command"
|
|
||||||
# support remote vllm server
|
|
||||||
client_remote_args=""
|
|
||||||
if [[ -z "${REMOTE_HOST}" && "${DRY_RUN:-0}" != "1" ]]; then
|
|
||||||
bash -c "$server_command" &
|
|
||||||
server_pid=$!
|
|
||||||
# wait until the server is alive
|
|
||||||
if wait_for_server; then
|
|
||||||
echo ""
|
|
||||||
echo "vLLM server is up and running."
|
|
||||||
else
|
|
||||||
echo ""
|
|
||||||
echo "vLLM failed to start within the timeout period."
|
|
||||||
fi
|
|
||||||
elif [[ "${DRY_RUN:-0}" == "1" ]]; then
|
|
||||||
# dry-run: don't start server
|
|
||||||
echo "Dry Run."
|
|
||||||
else
|
|
||||||
server_command="Using Remote Server $REMOTE_HOST $REMOTE_PORT"
|
|
||||||
if [[ ${REMOTE_PORT} ]]; then
|
|
||||||
client_remote_args=" --host=$REMOTE_HOST --port=$REMOTE_PORT "
|
|
||||||
else
|
|
||||||
client_remote_args=" --host=$REMOTE_HOST "
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# save the compilation mode and optimization level on the serving results
|
|
||||||
# whenever they are set
|
|
||||||
compilation_config_mode=$(echo "$server_params" | jq -r '."compilation_config.mode" // empty')
|
|
||||||
optimization_level=$(echo "$server_params" | jq -r '.optimization_level // empty')
|
|
||||||
|
|
||||||
# iterate over different QPS
|
|
||||||
for qps in $qps_list; do
|
|
||||||
# remove the surrounding single quote from qps
|
|
||||||
if [[ "$qps" == *"inf"* ]]; then
|
|
||||||
qps="inf"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# iterate over different max_concurrency
|
|
||||||
for max_concurrency in $max_concurrency_list; do
|
|
||||||
new_test_name="${test_name}_qps_${qps}_concurrency_${max_concurrency}"
|
|
||||||
echo " new test name $new_test_name"
|
|
||||||
# pass the tensor parallel size, the compilation mode, and the optimization
|
|
||||||
# level to the client so that they can be used on the benchmark dashboard
|
|
||||||
client_command="vllm bench serve \
|
|
||||||
--save-result \
|
|
||||||
--result-dir $RESULTS_FOLDER \
|
|
||||||
--result-filename ${new_test_name}.json \
|
|
||||||
--request-rate $qps \
|
|
||||||
--max-concurrency $max_concurrency \
|
|
||||||
--metadata tensor_parallel_size=$tp compilation_config.mode=$compilation_config_mode optimization_level=$optimization_level \
|
|
||||||
$client_args $client_remote_args "
|
|
||||||
|
|
||||||
echo "Running test case $test_name with qps $qps"
|
|
||||||
echo "Client command: $client_command"
|
|
||||||
|
|
||||||
if [[ "${DRY_RUN:-0}" != "1" ]]; then
|
|
||||||
bash -c "$client_command"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# record the benchmarking commands
|
|
||||||
jq_output=$(jq -n \
|
|
||||||
--arg server "$server_command" \
|
|
||||||
--arg client "$client_command" \
|
|
||||||
--arg gpu "$gpu_type" \
|
|
||||||
'{
|
|
||||||
server_command: $server,
|
|
||||||
client_command: $client,
|
|
||||||
gpu_type: $gpu
|
|
||||||
}')
|
|
||||||
echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands"
|
|
||||||
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
# clean up
|
|
||||||
if [[ "${DRY_RUN:-0}" != "1" ]]; then
|
|
||||||
kill -9 "$server_pid"
|
|
||||||
kill_gpu_processes
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
main() {
|
|
||||||
|
|
||||||
local ARCH
|
|
||||||
ARCH=''
|
|
||||||
if [[ "$ON_CPU" == "1" ]]; then
|
|
||||||
check_cpus
|
|
||||||
ARCH="-$gpu_type"
|
|
||||||
else
|
|
||||||
check_gpus
|
|
||||||
ARCH="$arch_suffix"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# DRY_RUN does not execute vLLM; do not require HF_TOKEN.
|
|
||||||
if [[ "${DRY_RUN:-0}" != "1" ]]; then
|
|
||||||
check_hf_token
|
|
||||||
else
|
|
||||||
echo "DRY_RUN=1 -> skip HF_TOKEN validation"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# dependencies
|
|
||||||
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
|
||||||
(which jq) || (apt-get update && apt-get -y install jq)
|
|
||||||
(which lsof) || (apt-get update && apt-get install -y lsof)
|
|
||||||
|
|
||||||
# get the current IP address, required by `vllm bench serve` command
|
|
||||||
export VLLM_HOST_IP=$(hostname -I | awk '{print $1}')
|
|
||||||
# turn of the reporting of the status of each request, to clean up the terminal output
|
|
||||||
export VLLM_LOGGING_LEVEL="WARNING"
|
|
||||||
|
|
||||||
# prepare for benchmarking
|
|
||||||
cd benchmarks || exit 1
|
|
||||||
ensure_sharegpt_downloaded
|
|
||||||
declare -g RESULTS_FOLDER=results/
|
|
||||||
mkdir -p $RESULTS_FOLDER
|
|
||||||
QUICK_BENCHMARK_ROOT=../.buildkite/performance-benchmarks/
|
|
||||||
|
|
||||||
# dump vllm info via vllm collect-env
|
|
||||||
env_output=$(vllm collect-env)
|
|
||||||
echo "$env_output" >"$RESULTS_FOLDER/vllm_env.txt"
|
|
||||||
|
|
||||||
# benchmarking
|
|
||||||
run_serving_tests $QUICK_BENCHMARK_ROOT/tests/"${SERVING_JSON:-serving-tests$ARCH.json}" || exit $?
|
|
||||||
|
|
||||||
if [[ "${DRY_RUN:-0}" == "1" ]]; then
|
|
||||||
echo "DRY_RUN=1 -> skip latency/startup/throughput suites"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
run_latency_tests $QUICK_BENCHMARK_ROOT/tests/"${LATENCY_JSON:-latency-tests$ARCH.json}"
|
|
||||||
run_startup_tests $QUICK_BENCHMARK_ROOT/tests/"${STARTUP_JSON:-startup-tests$ARCH.json}"
|
|
||||||
run_throughput_tests $QUICK_BENCHMARK_ROOT/tests/"${THROUGHPUT_JSON:-throughput-tests$ARCH.json}"
|
|
||||||
|
|
||||||
# postprocess benchmarking results
|
|
||||||
pip install tabulate pandas
|
|
||||||
python3 $QUICK_BENCHMARK_ROOT/scripts/convert-results-json-to-markdown.py
|
|
||||||
|
|
||||||
upload_to_buildkite
|
|
||||||
}
|
|
||||||
|
|
||||||
main "$@"
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"test_name": "latency_llama8B_tp1",
|
|
||||||
"environment_variables": {
|
|
||||||
"VLLM_RPC_TIMEOUT": 100000,
|
|
||||||
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1,
|
|
||||||
"VLLM_ENGINE_ITERATION_TIMEOUT_S": 120,
|
|
||||||
"VLLM_CPU_KVCACHE_SPACE": 40
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"distributed_executor_backend": "mp",
|
|
||||||
"block_size": 128,
|
|
||||||
"trust_remote_code": "",
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"enforce_eager": "",
|
|
||||||
"max_num_batched_tokens": 2048,
|
|
||||||
"max_num_seqs": 256,
|
|
||||||
"num_iters_warmup": 5,
|
|
||||||
"num_iters": 15
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"test_name": "latency_llama8B_tp2",
|
|
||||||
"environment_variables": {
|
|
||||||
"VLLM_RPC_TIMEOUT": 100000,
|
|
||||||
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1,
|
|
||||||
"VLLM_ENGINE_ITERATION_TIMEOUT_S": 120,
|
|
||||||
"VLLM_CPU_SGL_KERNEL": 1,
|
|
||||||
"VLLM_CPU_KVCACHE_SPACE": 40
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
||||||
"tensor_parallel_size": 2,
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"distributed_executor_backend": "mp",
|
|
||||||
"block_size": 128,
|
|
||||||
"trust_remote_code": "",
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"enforce_eager": "",
|
|
||||||
"max_num_batched_tokens": 2048,
|
|
||||||
"max_num_seqs": 256,
|
|
||||||
"num_iters_warmup": 5,
|
|
||||||
"num_iters": 15
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,106 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"test_name": "latency_llama8B_tp1",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"num-iters-warmup": 5,
|
|
||||||
"num-iters": 15,
|
|
||||||
"max-model-len": 256,
|
|
||||||
"async-scheduling": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "latency_llama70B_tp4",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
|
||||||
"tensor_parallel_size": 4,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"num-iters-warmup": 5,
|
|
||||||
"num-iters": 15,
|
|
||||||
"max-model-len": 256,
|
|
||||||
"async-scheduling": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "latency_mixtral8x7B_tp2",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
||||||
"tensor_parallel_size": 2,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"num-iters-warmup": 5,
|
|
||||||
"num-iters": 15,
|
|
||||||
"max-model-len": 256,
|
|
||||||
"async-scheduling": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "latency_deepseek_r1",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "deepseek-ai/DeepSeek-R1",
|
|
||||||
"tensor_parallel_size": 8,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"dtype": "bfloat16"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "latency_llama4_maverick_17b128e_instruct_fp8",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
||||||
"tensor_parallel_size": 8,
|
|
||||||
"max-model-len": 512,
|
|
||||||
"max-num-seqs": 128,
|
|
||||||
"async-scheduling": "",
|
|
||||||
"gpu-memory-utilization": 0.95,
|
|
||||||
"enable_expert_parallel": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "latency_qwen3_8b",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "Qwen/Qwen3-8B",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 128,
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"async-scheduling": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,130 +0,0 @@
|
|||||||
{
|
|
||||||
"defaults": {
|
|
||||||
"qps_list": [
|
|
||||||
"inf"
|
|
||||||
],
|
|
||||||
"max_concurrency_list": [
|
|
||||||
12,
|
|
||||||
16,
|
|
||||||
24,
|
|
||||||
32,
|
|
||||||
64,
|
|
||||||
128,
|
|
||||||
200
|
|
||||||
],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"VLLM_RPC_TIMEOUT": 100000,
|
|
||||||
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1,
|
|
||||||
"VLLM_ENGINE_ITERATION_TIMEOUT_S": 120,
|
|
||||||
"VLLM_CPU_SGL_KERNEL": 1,
|
|
||||||
"VLLM_CPU_KVCACHE_SPACE": 40
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"distributed_executor_backend": "mp",
|
|
||||||
"block_size": 128,
|
|
||||||
"trust_remote_code": "",
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"enforce_eager": "",
|
|
||||||
"max_num_batched_tokens": 2048,
|
|
||||||
"max_num_seqs": 256,
|
|
||||||
"load_format": "dummy"
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
||||||
"backend": "vllm",
|
|
||||||
"ignore-eos": "",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tests": [
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_sharegpt",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_sharegpt",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_random_128_2048",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 2048
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_random_128_2048",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 2048
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_random_2048_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 2048,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_random_2048_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 2048,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
{
|
|
||||||
"defaults": {
|
|
||||||
"qps_list": [
|
|
||||||
"inf"
|
|
||||||
],
|
|
||||||
"max_concurrency_list": [
|
|
||||||
32,
|
|
||||||
64,
|
|
||||||
128
|
|
||||||
],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"VLLM_RPC_TIMEOUT": 100000,
|
|
||||||
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1,
|
|
||||||
"VLLM_ENGINE_ITERATION_TIMEOUT_S": 120,
|
|
||||||
"VLLM_CPU_SGL_KERNEL": 1,
|
|
||||||
"VLLM_CPU_KVCACHE_SPACE": 40
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"model": "jinaai/jina-embeddings-v3",
|
|
||||||
"trust_remote_code": ""
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "jinaai/jina-embeddings-v3",
|
|
||||||
"backend": "openai-embeddings",
|
|
||||||
"endpoint": "/v1/embeddings",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tests": [
|
|
||||||
{
|
|
||||||
"test_name": "serving_jina_embed_v3_tp1_sharegpt",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,283 +0,0 @@
|
|||||||
{
|
|
||||||
"defaults": {
|
|
||||||
"qps_list": [
|
|
||||||
"inf"
|
|
||||||
],
|
|
||||||
"max_concurrency_list": [12, 16, 24, 32, 64, 128, 200],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"VLLM_RPC_TIMEOUT": 100000,
|
|
||||||
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1,
|
|
||||||
"VLLM_ENGINE_ITERATION_TIMEOUT_S": 120,
|
|
||||||
"VLLM_CPU_SGL_KERNEL": 1,
|
|
||||||
"VLLM_CPU_KVCACHE_SPACE": 40
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"distributed_executor_backend": "mp",
|
|
||||||
"block_size": 128,
|
|
||||||
"trust_remote_code": "",
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"max_num_batched_tokens": 2048,
|
|
||||||
"max_num_seqs": 256
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
||||||
"backend": "vllm",
|
|
||||||
"ignore-eos": "",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tests": [
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_sharegpt",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_sharegpt",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp4_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 4
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_random_128_2048",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 2048
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_random_128_2048",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 2048
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp4_random_128_2048",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 4
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 2048
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_random_2048_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 2048,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_random_2048_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 2048,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp4_random_2048_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 4
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 2048,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_int4_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_int4_tp2_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_int4_tp4_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"tensor_parallel_size": 4
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama3B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.2-3B-Instruct",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.2-3B-Instruct",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_granite2B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "ibm-granite/granite-3.2-2b-instruct",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "ibm-granite/granite-3.2-2b-instruct",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_qwen1.7B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-1.7B",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-1.7B",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_qwen4B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-4B",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-4B",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_qwen8B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-8B",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-8B",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_glm9B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "zai-org/glm-4-9b-hf",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "zai-org/glm-4-9b-hf",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_gemma7B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "google/gemma-7b",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "google/gemma-7b",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,153 +0,0 @@
|
|||||||
{
|
|
||||||
"defaults": {
|
|
||||||
"qps_list": [
|
|
||||||
"inf"
|
|
||||||
],
|
|
||||||
"max_concurrency_list": [12, 16, 24, 32, 64, 128, 200],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"VLLM_RPC_TIMEOUT": 100000,
|
|
||||||
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1,
|
|
||||||
"VLLM_ENGINE_ITERATION_TIMEOUT_S": 120,
|
|
||||||
"VLLM_CPU_SGL_KERNEL": 1,
|
|
||||||
"VLLM_CPU_KVCACHE_SPACE": 40
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"distributed_executor_backend": "mp",
|
|
||||||
"block_size": 128,
|
|
||||||
"trust_remote_code": "",
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"max_num_batched_tokens": 2048,
|
|
||||||
"max_num_seqs": 256
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
||||||
"backend": "vllm",
|
|
||||||
"ignore-eos": "",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tests": [
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_sharegpt",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_sharegpt",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp4_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 4
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_random_128_2048",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 2048
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_random_128_2048",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 2048
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp4_random_128_2048",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 4
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 2048
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_random_2048_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 2048,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp2_random_2048_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 2048,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp4_random_2048_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"tensor_parallel_size": 4
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 2048,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,161 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_tp1_sharegpt",
|
|
||||||
"qps_list": [1, 4, 16, "inf"],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"swap_space": 16,
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"load_format": "dummy",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 256,
|
|
||||||
"async-scheduling": ""
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
||||||
"backend": "vllm",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama70B_tp4_sharegpt",
|
|
||||||
"qps_list": [1, 4, 16, "inf"],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
|
||||||
"tensor_parallel_size": 4,
|
|
||||||
"swap_space": 16,
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"load_format": "dummy",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 256,
|
|
||||||
"async-scheduling": ""
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
|
||||||
"backend": "vllm",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_mixtral8x7B_tp2_sharegpt",
|
|
||||||
"qps_list": [1, 4, 16, "inf"],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
||||||
"tensor_parallel_size": 2,
|
|
||||||
"swap_space": 16,
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"load_format": "dummy",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 256,
|
|
||||||
"async-scheduling": ""
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
||||||
"backend": "vllm",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_deepseek_r1",
|
|
||||||
"qps_list": [1, 4, 16, "inf"],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "deepseek-ai/DeepSeek-R1",
|
|
||||||
"tensor_parallel_size": 8,
|
|
||||||
"swap_space": 16,
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"load_format": "dummy",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 200,
|
|
||||||
"async-scheduling": "",
|
|
||||||
"dtype": "bfloat16"
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "deepseek-ai/DeepSeek-R1",
|
|
||||||
"backend": "vllm",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama4_maverick_17b128e_instruct_fp8",
|
|
||||||
"qps_list": [1, 4, 16, "inf"],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
||||||
"tensor_parallel_size": 8,
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 128,
|
|
||||||
"async-scheduling": "",
|
|
||||||
"enable_expert_parallel": "",
|
|
||||||
"max-num-batched-tokens": 4096
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
||||||
"backend": "vllm",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_qwen3_8b",
|
|
||||||
"qps_list": [1, 4, 10, "inf"],
|
|
||||||
"server_environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "Qwen/Qwen-3-8B",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"async-scheduling": ""
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "Qwen/Qwen-3-8B",
|
|
||||||
"backend": "vllm",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 200
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"test_name": "throughput_llama8B_tp1",
|
|
||||||
"environment_variables": {
|
|
||||||
"VLLM_RPC_TIMEOUT": 100000,
|
|
||||||
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1,
|
|
||||||
"VLLM_ENGINE_ITERATION_TIMEOUT_S": 120,
|
|
||||||
"VLLM_CPU_KVCACHE_SPACE": 40
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"distributed_executor_backend": "mp",
|
|
||||||
"block_size": 128,
|
|
||||||
"trust_remote_code": "",
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"enforce_eager": "",
|
|
||||||
"max_num_batched_tokens": 2048,
|
|
||||||
"max_num_seqs": 256,
|
|
||||||
"dataset": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 200,
|
|
||||||
"backend": "vllm"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"test_name": "throughput_llama8B_tp2",
|
|
||||||
"environment_variables": {
|
|
||||||
"VLLM_RPC_TIMEOUT": 100000,
|
|
||||||
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1,
|
|
||||||
"VLLM_ENGINE_ITERATION_TIMEOUT_S": 120,
|
|
||||||
"VLLM_CPU_SGL_KERNEL": 1,
|
|
||||||
"VLLM_CPU_KVCACHE_SPACE": 40
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
|
||||||
"tensor_parallel_size": 2,
|
|
||||||
"dtype": "bfloat16",
|
|
||||||
"distributed_executor_backend": "mp",
|
|
||||||
"block_size": 128,
|
|
||||||
"trust_remote_code": "",
|
|
||||||
"disable_log_stats": "",
|
|
||||||
"enforce_eager": "",
|
|
||||||
"max_num_batched_tokens": 2048,
|
|
||||||
"max_num_seqs": 256,
|
|
||||||
"dataset": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 200,
|
|
||||||
"backend": "vllm"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,123 +0,0 @@
|
|||||||
[
|
|
||||||
{
|
|
||||||
"test_name": "throughput_llama8B_tp1",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 1000,
|
|
||||||
"backend": "vllm",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 512,
|
|
||||||
"async-scheduling": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "throughput_llama70B_tp4",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
|
||||||
"tensor_parallel_size": 4,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 1000,
|
|
||||||
"backend": "vllm",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 512,
|
|
||||||
"async-scheduling": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "throughput_mixtral8x7B_tp2",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
||||||
"tensor_parallel_size": 2,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"num_prompts": 1000,
|
|
||||||
"backend": "vllm",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 512,
|
|
||||||
"async-scheduling": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "throughput_deepseek_r1",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "deepseek-ai/DeepSeek-R1",
|
|
||||||
"tensor_parallel_size": 8,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"num_prompts": 1000,
|
|
||||||
"backend": "vllm",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 384,
|
|
||||||
"async-scheduling": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "throughput_llama4_maverick_17b128e_instruct_fp8",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
|
||||||
"tensor_parallel_size": 8,
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"num_prompts": 1000,
|
|
||||||
"backend": "vllm",
|
|
||||||
"max-model-len": 2048,
|
|
||||||
"max-num-seqs": 512,
|
|
||||||
"async-scheduling": "",
|
|
||||||
"enable_expert_parallel": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "throughput_qwen3_8b",
|
|
||||||
"environment_variables": {
|
|
||||||
"PT_HPU_LAZY_MODE": 1,
|
|
||||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
|
||||||
"VLLM_CONTIGUOUS_PA": 1,
|
|
||||||
"VLLM_DEFRAG": 1
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"model": "Qwen/Qwen-3-8B",
|
|
||||||
"tensor_parallel_size": 1,
|
|
||||||
"load_format": "dummy",
|
|
||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
|
||||||
"dataset_name": "sharegpt",
|
|
||||||
"num_prompts": 1000,
|
|
||||||
"max-num-seqs": 512,
|
|
||||||
"backend": "vllm",
|
|
||||||
"async-scheduling": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
@@ -1,713 +1,103 @@
|
|||||||
steps:
|
steps:
|
||||||
|
- label: "Build wheel - CUDA 12.4"
|
||||||
|
agents:
|
||||||
|
queue: cpu_queue_postmerge
|
||||||
|
commands:
|
||||||
|
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.4.0 --tag vllm-ci:build-image --target build --progress plain -f docker/Dockerfile ."
|
||||||
|
- "mkdir artifacts"
|
||||||
|
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
|
||||||
|
- "bash .buildkite/scripts/upload-wheels.sh"
|
||||||
|
env:
|
||||||
|
DOCKER_BUILDKIT: "1"
|
||||||
|
|
||||||
|
- label: "Build wheel - CUDA 12.1"
|
||||||
|
agents:
|
||||||
|
queue: cpu_queue_postmerge
|
||||||
|
commands:
|
||||||
|
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.1.0 --tag vllm-ci:build-image --target build --progress plain -f docker/Dockerfile ."
|
||||||
|
- "mkdir artifacts"
|
||||||
|
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
|
||||||
|
- "bash .buildkite/scripts/upload-wheels.sh"
|
||||||
|
env:
|
||||||
|
DOCKER_BUILDKIT: "1"
|
||||||
|
|
||||||
|
# Note(simon): We can always build CUDA 11.8 wheel to ensure the build is working.
|
||||||
|
# However, this block can be uncommented to save some compute hours.
|
||||||
|
# - block: "Build CUDA 11.8 wheel"
|
||||||
|
# key: block-build-cu118-wheel
|
||||||
|
|
||||||
|
- label: "Build wheel - CUDA 11.8"
|
||||||
|
# depends_on: block-build-cu118-wheel
|
||||||
|
agents:
|
||||||
|
queue: cpu_queue_postmerge
|
||||||
|
commands:
|
||||||
|
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=11.8.0 --tag vllm-ci:build-image --target build --progress plain -f docker/Dockerfile ."
|
||||||
|
- "mkdir artifacts"
|
||||||
|
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
|
||||||
|
- "bash .buildkite/scripts/upload-wheels.sh"
|
||||||
|
env:
|
||||||
|
DOCKER_BUILDKIT: "1"
|
||||||
|
|
||||||
|
- block: "Build release image"
|
||||||
|
depends_on: ~
|
||||||
|
key: block-release-image-build
|
||||||
|
|
||||||
|
- label: "Build release image"
|
||||||
|
depends_on: block-release-image-build
|
||||||
|
agents:
|
||||||
|
queue: cpu_queue_postmerge
|
||||||
|
commands:
|
||||||
|
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
||||||
|
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.4.0 --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT --target vllm-openai --progress plain -f docker/Dockerfile ."
|
||||||
|
- "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT"
|
||||||
|
|
||||||
|
- label: "Build and publish TPU release image"
|
||||||
|
depends_on: ~
|
||||||
|
if: build.env("NIGHTLY") == "1"
|
||||||
|
agents:
|
||||||
|
queue: tpu_queue_postmerge
|
||||||
|
commands:
|
||||||
|
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --tag vllm/vllm-tpu:nightly --tag vllm/vllm-tpu:$BUILDKITE_COMMIT --progress plain -f docker/Dockerfile.tpu ."
|
||||||
|
- "docker push vllm/vllm-tpu:nightly"
|
||||||
|
- "docker push vllm/vllm-tpu:$BUILDKITE_COMMIT"
|
||||||
|
plugins:
|
||||||
|
- docker-login#v3.0.0:
|
||||||
|
username: vllm
|
||||||
|
password-env: DOCKERHUB_TOKEN
|
||||||
|
env:
|
||||||
|
DOCKER_BUILDKIT: "1"
|
||||||
|
|
||||||
- input: "Provide Release version here"
|
- input: "Provide Release version here"
|
||||||
id: input-release-version
|
|
||||||
fields:
|
fields:
|
||||||
- text: "What is the release version?"
|
- text: "What is the release version?"
|
||||||
key: release-version
|
key: "release-version"
|
||||||
|
|
||||||
- group: "Build Python wheels"
|
- block: "Build CPU release image"
|
||||||
key: "build-wheels"
|
key: block-cpu-release-image-build
|
||||||
steps:
|
|
||||||
- label: "Build wheel - aarch64 - CUDA 12.9"
|
|
||||||
depends_on: ~
|
|
||||||
id: build-wheel-arm64-cuda-12-9
|
|
||||||
agents:
|
|
||||||
queue: arm64_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
# #NOTE: torch_cuda_arch_list is derived from upstream PyTorch build files here:
|
|
||||||
# https://github.com/pytorch/pytorch/blob/main/.ci/aarch64_linux/aarch64_ci_build.sh#L7
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.9.1 --build-arg torch_cuda_arch_list='8.7 8.9 9.0 10.0+PTX 12.0' --tag vllm-ci:build-image --target build --progress plain -f docker/Dockerfile ."
|
|
||||||
- "mkdir artifacts"
|
|
||||||
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
|
|
||||||
- "bash .buildkite/scripts/upload-nightly-wheels.sh"
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
|
|
||||||
- label: "Build wheel - aarch64 - CUDA 13.0"
|
|
||||||
depends_on: ~
|
|
||||||
id: build-wheel-arm64-cuda-13-0
|
|
||||||
agents:
|
|
||||||
queue: arm64_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
# #NOTE: torch_cuda_arch_list is derived from upstream PyTorch build files here:
|
|
||||||
# https://github.com/pytorch/pytorch/blob/main/.ci/aarch64_linux/aarch64_ci_build.sh#L7
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=13.0.1 --build-arg torch_cuda_arch_list='8.7 8.9 9.0 10.0+PTX 12.0' --build-arg BUILD_BASE_IMAGE=nvidia/cuda:13.0.1-devel-ubuntu22.04 --tag vllm-ci:build-image --target build --progress plain -f docker/Dockerfile ."
|
|
||||||
- "mkdir artifacts"
|
|
||||||
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
|
|
||||||
- "bash .buildkite/scripts/upload-nightly-wheels.sh manylinux_2_35"
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
|
|
||||||
- label: "Build wheel - aarch64 - CPU"
|
|
||||||
depends_on: ~
|
|
||||||
id: build-wheel-arm64-cpu
|
|
||||||
agents:
|
|
||||||
queue: arm64_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --build-arg VLLM_BUILD_ACL=ON --tag vllm-ci:build-image --target vllm-build --progress plain -f docker/Dockerfile.cpu ."
|
|
||||||
- "mkdir artifacts"
|
|
||||||
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
|
|
||||||
- "bash .buildkite/scripts/upload-nightly-wheels.sh manylinux_2_35"
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
|
|
||||||
- label: "Build wheel - x86_64 - CUDA 12.9"
|
|
||||||
depends_on: ~
|
|
||||||
id: build-wheel-x86-cuda-12-9
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.9.1 --tag vllm-ci:build-image --target build --progress plain -f docker/Dockerfile ."
|
|
||||||
- "mkdir artifacts"
|
|
||||||
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
|
|
||||||
- "bash .buildkite/scripts/upload-nightly-wheels.sh manylinux_2_31"
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
|
|
||||||
- label: "Build wheel - x86_64 - CUDA 13.0"
|
|
||||||
depends_on: ~
|
|
||||||
id: build-wheel-x86-cuda-13-0
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=13.0.1 --build-arg BUILD_BASE_IMAGE=nvidia/cuda:13.0.1-devel-ubuntu22.04 --tag vllm-ci:build-image --target build --progress plain -f docker/Dockerfile ."
|
|
||||||
- "mkdir artifacts"
|
|
||||||
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
|
|
||||||
- "bash .buildkite/scripts/upload-nightly-wheels.sh manylinux_2_35"
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
|
|
||||||
- label: "Build wheel - x86_64 - CPU"
|
|
||||||
depends_on: ~
|
|
||||||
id: build-wheel-x86-cpu
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --build-arg VLLM_CPU_AVX512BF16=true --build-arg VLLM_CPU_AVX512VNNI=true --build-arg VLLM_CPU_AMXBF16=true --tag vllm-ci:build-image --target vllm-build --progress plain -f docker/Dockerfile.cpu ."
|
|
||||||
- "mkdir artifacts"
|
|
||||||
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
|
|
||||||
- "bash .buildkite/scripts/upload-nightly-wheels.sh manylinux_2_35"
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
|
|
||||||
- group: "Build release Docker images"
|
|
||||||
key: "build-release-images"
|
|
||||||
steps:
|
|
||||||
- label: "Build release image - x86_64 - CUDA 12.9"
|
|
||||||
depends_on: ~
|
|
||||||
id: build-release-image-x86
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.9.1 --build-arg FLASHINFER_AOT_COMPILE=true --build-arg INSTALL_KV_CONNECTORS=true --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-$(uname -m) --target vllm-openai --progress plain -f docker/Dockerfile ."
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-$(uname -m)"
|
|
||||||
# re-tag to default image tag and push, just in case arm64 build fails
|
|
||||||
- "docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-$(uname -m) public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT"
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT"
|
|
||||||
|
|
||||||
- label: "Build release image - aarch64 - CUDA 12.9"
|
|
||||||
depends_on: ~
|
|
||||||
id: build-release-image-arm64
|
|
||||||
agents:
|
|
||||||
queue: arm64_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.9.1 --build-arg FLASHINFER_AOT_COMPILE=true --build-arg torch_cuda_arch_list='8.7 8.9 9.0 10.0+PTX 12.0' --build-arg INSTALL_KV_CONNECTORS=true --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-$(uname -m) --target vllm-openai --progress plain -f docker/Dockerfile ."
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-$(uname -m)"
|
|
||||||
|
|
||||||
- label: "Build release image - x86_64 - CUDA 13.0"
|
|
||||||
depends_on: ~
|
|
||||||
id: build-release-image-x86-cuda-13-0
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=13.0.1 --build-arg INSTALL_KV_CONNECTORS=true --build-arg BUILD_BASE_IMAGE=nvidia/cuda:13.0.1-devel-ubuntu22.04 --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-$(uname -m)-cu130 --target vllm-openai --progress plain -f docker/Dockerfile ."
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-$(uname -m)-cu130"
|
|
||||||
# re-tag to default image tag and push, just in case arm64 build fails
|
|
||||||
- "docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-$(uname -m)-cu130 public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-cu130"
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-cu130"
|
|
||||||
|
|
||||||
- label: "Build release image - aarch64 - CUDA 13.0"
|
|
||||||
depends_on: ~
|
|
||||||
id: build-release-image-arm64-cuda-13-0
|
|
||||||
agents:
|
|
||||||
queue: arm64_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
|
||||||
# compute capability 12.0 for RTX-50 series / RTX PRO 6000 Blackwell, 12.1 for DGX Spark
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=13.0.1 --build-arg torch_cuda_arch_list='8.7 8.9 9.0 10.0+PTX 12.0 12.1' --build-arg INSTALL_KV_CONNECTORS=true --build-arg BUILD_BASE_IMAGE=nvidia/cuda:13.0.1-devel-ubuntu22.04 --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-$(uname -m)-cu130 --target vllm-openai --progress plain -f docker/Dockerfile ."
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-$(uname -m)-cu130"
|
|
||||||
|
|
||||||
- block: "Build release image for x86_64 CPU"
|
|
||||||
key: block-cpu-release-image-build
|
|
||||||
depends_on: ~
|
|
||||||
|
|
||||||
- label: "Build release image - x86_64 - CPU"
|
|
||||||
depends_on:
|
|
||||||
- block-cpu-release-image-build
|
|
||||||
- input-release-version
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --build-arg VLLM_CPU_AVX512BF16=true --build-arg VLLM_CPU_AVX512VNNI=true --build-arg VLLM_CPU_AMXBF16=true --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version) --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:latest --progress plain --target vllm-openai -f docker/Dockerfile.cpu ."
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:latest"
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version)"
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
|
|
||||||
- block: "Build release image for arm64 CPU"
|
|
||||||
key: block-arm64-cpu-release-image-build
|
|
||||||
depends_on: ~
|
|
||||||
|
|
||||||
- label: "Build release image - arm64 - CPU"
|
|
||||||
depends_on:
|
|
||||||
- block-arm64-cpu-release-image-build
|
|
||||||
- input-release-version
|
|
||||||
agents:
|
|
||||||
queue: arm64_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
|
||||||
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --tag public.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo:$(buildkite-agent meta-data get release-version) --tag public.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo:latest --progress plain --target vllm-openai -f docker/Dockerfile.cpu ."
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo:latest"
|
|
||||||
- "docker push public.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo:$(buildkite-agent meta-data get release-version)"
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
|
|
||||||
- group: "Publish release images"
|
|
||||||
key: "publish-release-images"
|
|
||||||
steps:
|
|
||||||
- label: "Create multi-arch manifest - CUDA 12.9"
|
|
||||||
depends_on:
|
|
||||||
- build-release-image-x86
|
|
||||||
- build-release-image-arm64
|
|
||||||
id: create-multi-arch-manifest
|
|
||||||
agents:
|
|
||||||
queue: small_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
|
||||||
- "docker manifest create public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-x86_64 public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-aarch64 --amend"
|
|
||||||
- "docker manifest push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT"
|
|
||||||
|
|
||||||
- label: "Annotate release workflow - CUDA 12.9"
|
|
||||||
depends_on:
|
|
||||||
- create-multi-arch-manifest
|
|
||||||
id: annotate-release-workflow
|
|
||||||
agents:
|
|
||||||
queue: small_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "bash .buildkite/scripts/annotate-release.sh"
|
|
||||||
|
|
||||||
- label: "Create multi-arch manifest - CUDA 13.0"
|
|
||||||
depends_on:
|
|
||||||
- build-release-image-x86-cuda-13-0
|
|
||||||
- build-release-image-arm64-cuda-13-0
|
|
||||||
id: create-multi-arch-manifest-cuda-13-0
|
|
||||||
agents:
|
|
||||||
queue: small_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
|
||||||
- "docker manifest create public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-cu130 public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-x86_64-cu130 public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-aarch64-cu130 --amend"
|
|
||||||
- "docker manifest push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT-cu130"
|
|
||||||
|
|
||||||
- label: "Publish nightly multi-arch image to DockerHub"
|
|
||||||
depends_on:
|
|
||||||
- create-multi-arch-manifest
|
|
||||||
if: build.env("NIGHTLY") == "1"
|
|
||||||
agents:
|
|
||||||
queue: small_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "bash .buildkite/scripts/push-nightly-builds.sh"
|
|
||||||
# Clean up old nightly builds (keep only last 14)
|
|
||||||
- "bash .buildkite/scripts/cleanup-nightly-builds.sh"
|
|
||||||
plugins:
|
|
||||||
- docker-login#v3.0.0:
|
|
||||||
username: vllmbot
|
|
||||||
password-env: DOCKERHUB_TOKEN
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
DOCKERHUB_USERNAME: "vllmbot"
|
|
||||||
|
|
||||||
- label: "Publish nightly multi-arch image to DockerHub - CUDA 13.0"
|
|
||||||
depends_on:
|
|
||||||
- create-multi-arch-manifest-cuda-13-0
|
|
||||||
if: build.env("NIGHTLY") == "1"
|
|
||||||
agents:
|
|
||||||
queue: small_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "bash .buildkite/scripts/push-nightly-builds.sh cu130"
|
|
||||||
# Clean up old nightly builds (keep only last 14)
|
|
||||||
- "bash .buildkite/scripts/cleanup-nightly-builds.sh cu130-nightly-"
|
|
||||||
plugins:
|
|
||||||
- docker-login#v3.0.0:
|
|
||||||
username: vllmbot
|
|
||||||
password-env: DOCKERHUB_TOKEN
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
DOCKERHUB_USERNAME: "vllmbot"
|
|
||||||
|
|
||||||
- group: "Publish wheels"
|
|
||||||
key: "publish-wheels"
|
|
||||||
steps:
|
|
||||||
- block: "Confirm update release wheels to PyPI (experimental, use with caution)?"
|
|
||||||
key: block-upload-release-wheels
|
|
||||||
depends_on:
|
|
||||||
- input-release-version
|
|
||||||
- build-wheels
|
|
||||||
|
|
||||||
- label: "Upload release wheels to PyPI"
|
|
||||||
depends_on:
|
|
||||||
- block-upload-release-wheels
|
|
||||||
id: upload-release-wheels
|
|
||||||
agents:
|
|
||||||
queue: small_cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "bash .buildkite/scripts/upload-release-wheels-pypi.sh"
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# ROCm Release Pipeline (x86_64 only)
|
|
||||||
# =============================================================================
|
|
||||||
#
|
|
||||||
# vLLM version is determined by the Buildkite checkout (like CUDA pipeline).
|
|
||||||
# To build a specific version, trigger the build from that branch/tag.
|
|
||||||
#
|
|
||||||
# Environment variables for ROCm builds (set via Buildkite UI or schedule):
|
|
||||||
# ROCM_PYTHON_VERSION: Python version (default: 3.12)
|
|
||||||
# PYTORCH_ROCM_ARCH: GPU architectures (default: gfx90a;gfx942;gfx950;gfx1100;gfx1101;gfx1200;gfx1201;gfx1150;gfx1151)
|
|
||||||
# ROCM_UPLOAD_WHEELS: Upload to S3 (default: false for nightly, true for releases)
|
|
||||||
# ROCM_FORCE_REBUILD: Force rebuild base wheels, ignore S3 cache (default: false)
|
|
||||||
#
|
|
||||||
# Note: ROCm version is determined by BASE_IMAGE in docker/Dockerfile.rocm_base
|
|
||||||
# (currently rocm/dev-ubuntu-22.04:7.1-complete)
|
|
||||||
#
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
# ROCm Input Step - Collect build configuration (manual trigger only)
|
|
||||||
- input: "ROCm Wheel Release Build Configuration"
|
|
||||||
key: input-rocm-config
|
|
||||||
depends_on: ~
|
depends_on: ~
|
||||||
if: build.source == "ui"
|
|
||||||
fields:
|
|
||||||
- text: "Python Version"
|
|
||||||
key: "rocm-python-version"
|
|
||||||
default: "3.12"
|
|
||||||
hint: "Python version (e.g., 3.12)"
|
|
||||||
- text: "GPU Architectures"
|
|
||||||
key: "rocm-pytorch-rocm-arch"
|
|
||||||
default: "gfx90a;gfx942;gfx950;gfx1100;gfx1101;gfx1200;gfx1201;gfx1150;gfx1151"
|
|
||||||
hint: "Semicolon-separated GPU architectures"
|
|
||||||
- select: "Upload Wheels to S3"
|
|
||||||
key: "rocm-upload-wheels"
|
|
||||||
default: "true"
|
|
||||||
options:
|
|
||||||
- label: "No - Build only (nightly/dev)"
|
|
||||||
value: "false"
|
|
||||||
- label: "Yes - Upload to S3 (release)"
|
|
||||||
value: "true"
|
|
||||||
- select: "Force Rebuild Base Wheels"
|
|
||||||
key: "rocm-force-rebuild"
|
|
||||||
default: "false"
|
|
||||||
hint: "Ignore S3 cache and rebuild base wheels from scratch"
|
|
||||||
options:
|
|
||||||
- label: "No - Use cached wheels if available"
|
|
||||||
value: "false"
|
|
||||||
- label: "Yes - Rebuild even if cache exists"
|
|
||||||
value: "true"
|
|
||||||
|
|
||||||
# ROCm Job 1: Build ROCm Base Wheels (with S3 caching)
|
- label: "Build and publish CPU release image"
|
||||||
- label: ":rocm: Build ROCm Base Wheels"
|
depends_on: block-cpu-release-image-build
|
||||||
id: build-rocm-base-wheels
|
|
||||||
depends_on:
|
|
||||||
- step: input-rocm-config
|
|
||||||
allow_failure: true # Allow failure so non-UI builds can proceed (input step is skipped)
|
|
||||||
agents:
|
agents:
|
||||||
queue: cpu_queue_postmerge
|
queue: cpu_queue_postmerge
|
||||||
commands:
|
commands:
|
||||||
# Set configuration and check cache
|
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
||||||
- |
|
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version) --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:latest --progress plain --target vllm-openai -f docker/Dockerfile.cpu ."
|
||||||
set -euo pipefail
|
- "docker push public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version)"
|
||||||
|
|
||||||
# Get values from meta-data (set by input step) or use defaults
|
|
||||||
PYTHON_VERSION="$$(buildkite-agent meta-data get rocm-python-version 2>/dev/null || echo '')"
|
|
||||||
export PYTHON_VERSION="$${PYTHON_VERSION:-3.12}"
|
|
||||||
|
|
||||||
PYTORCH_ROCM_ARCH="$$(buildkite-agent meta-data get rocm-pytorch-rocm-arch 2>/dev/null || echo '')"
|
|
||||||
export PYTORCH_ROCM_ARCH="$${PYTORCH_ROCM_ARCH:-gfx90a;gfx942;gfx950;gfx1100;gfx1101;gfx1200;gfx1201;gfx1150;gfx1151}"
|
|
||||||
|
|
||||||
# Check for force rebuild flag
|
|
||||||
ROCM_FORCE_REBUILD="$${ROCM_FORCE_REBUILD:-}"
|
|
||||||
if [ -z "$${ROCM_FORCE_REBUILD}" ]; then
|
|
||||||
ROCM_FORCE_REBUILD="$$(buildkite-agent meta-data get rocm-force-rebuild 2>/dev/null || echo '')"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "========================================"
|
|
||||||
echo "ROCm Base Wheels Build Configuration"
|
|
||||||
echo "========================================"
|
|
||||||
echo " PYTHON_VERSION: $${PYTHON_VERSION}"
|
|
||||||
echo " PYTORCH_ROCM_ARCH: $${PYTORCH_ROCM_ARCH}"
|
|
||||||
echo " ROCM_FORCE_REBUILD: $${ROCM_FORCE_REBUILD:-false}"
|
|
||||||
echo "========================================"
|
|
||||||
|
|
||||||
# Save resolved config for later jobs
|
|
||||||
buildkite-agent meta-data set "rocm-python-version" "$${PYTHON_VERSION}"
|
|
||||||
buildkite-agent meta-data set "rocm-pytorch-rocm-arch" "$${PYTORCH_ROCM_ARCH}"
|
|
||||||
|
|
||||||
# Check S3 cache for pre-built wheels
|
|
||||||
CACHE_KEY=$$(.buildkite/scripts/cache-rocm-base-wheels.sh key)
|
|
||||||
CACHE_PATH=$$(.buildkite/scripts/cache-rocm-base-wheels.sh path)
|
|
||||||
echo ""
|
|
||||||
echo "Cache key: $${CACHE_KEY}"
|
|
||||||
echo "Cache path: $${CACHE_PATH}"
|
|
||||||
|
|
||||||
# Save cache key for downstream jobs
|
|
||||||
buildkite-agent meta-data set "rocm-cache-key" "$${CACHE_KEY}"
|
|
||||||
|
|
||||||
CACHE_STATUS="miss"
|
|
||||||
if [ "$${ROCM_FORCE_REBUILD}" != "true" ]; then
|
|
||||||
CACHE_STATUS=$$(.buildkite/scripts/cache-rocm-base-wheels.sh check)
|
|
||||||
else
|
|
||||||
echo "Force rebuild requested, skipping cache check"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$${CACHE_STATUS}" = "hit" ]; then
|
|
||||||
echo ""
|
|
||||||
echo "CACHE HIT! Downloading pre-built wheels..."
|
|
||||||
echo ""
|
|
||||||
.buildkite/scripts/cache-rocm-base-wheels.sh download
|
|
||||||
|
|
||||||
# Set the S3 path for the cached Docker image (for Job 2 to download)
|
|
||||||
S3_ARTIFACT_PATH="s3://$${S3_BUCKET}/rocm/cache/$${CACHE_KEY}"
|
|
||||||
buildkite-agent meta-data set "rocm-docker-image-s3-path" "$${S3_ARTIFACT_PATH}/rocm-base-image.tar.gz"
|
|
||||||
|
|
||||||
# Mark that we used cache (for Docker image handling)
|
|
||||||
buildkite-agent meta-data set "rocm-used-cache" "true"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Cache download complete. Skipping Docker build."
|
|
||||||
echo "Docker image will be downloaded from: $${S3_ARTIFACT_PATH}/rocm-base-image.tar.gz"
|
|
||||||
else
|
|
||||||
echo ""
|
|
||||||
echo "CACHE MISS. Building from scratch..."
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Build full base image (for later vLLM build)
|
|
||||||
DOCKER_BUILDKIT=1 docker buildx build \
|
|
||||||
--file docker/Dockerfile.rocm_base \
|
|
||||||
--tag rocm/vllm-dev:base-$${BUILDKITE_BUILD_NUMBER} \
|
|
||||||
--build-arg PYTORCH_ROCM_ARCH="$${PYTORCH_ROCM_ARCH}" \
|
|
||||||
--build-arg PYTHON_VERSION="$${PYTHON_VERSION}" \
|
|
||||||
--build-arg USE_SCCACHE=1 \
|
|
||||||
--build-arg SCCACHE_BUCKET_NAME=vllm-build-sccache \
|
|
||||||
--build-arg SCCACHE_REGION_NAME=us-west-2 \
|
|
||||||
--build-arg SCCACHE_S3_NO_CREDENTIALS=0 \
|
|
||||||
--load \
|
|
||||||
.
|
|
||||||
|
|
||||||
# Build debs_wheel_release stage for wheel extraction
|
|
||||||
DOCKER_BUILDKIT=1 docker buildx build \
|
|
||||||
--file docker/Dockerfile.rocm_base \
|
|
||||||
--tag rocm-base-debs:$${BUILDKITE_BUILD_NUMBER} \
|
|
||||||
--target debs_wheel_release \
|
|
||||||
--build-arg PYTORCH_ROCM_ARCH="$${PYTORCH_ROCM_ARCH}" \
|
|
||||||
--build-arg PYTHON_VERSION="$${PYTHON_VERSION}" \
|
|
||||||
--build-arg USE_SCCACHE=1 \
|
|
||||||
--build-arg SCCACHE_BUCKET_NAME=vllm-build-sccache \
|
|
||||||
--build-arg SCCACHE_REGION_NAME=us-west-2 \
|
|
||||||
--build-arg SCCACHE_S3_NO_CREDENTIALS=0 \
|
|
||||||
--load \
|
|
||||||
.
|
|
||||||
|
|
||||||
# Extract wheels from Docker image
|
|
||||||
mkdir -p artifacts/rocm-base-wheels
|
|
||||||
container_id=$$(docker create rocm-base-debs:$${BUILDKITE_BUILD_NUMBER})
|
|
||||||
docker cp $${container_id}:/app/debs/. artifacts/rocm-base-wheels/
|
|
||||||
docker rm $${container_id}
|
|
||||||
echo "Extracted base wheels:"
|
|
||||||
ls -lh artifacts/rocm-base-wheels/
|
|
||||||
|
|
||||||
# Upload wheels to S3 cache for future builds
|
|
||||||
echo ""
|
|
||||||
echo "Uploading wheels to S3 cache..."
|
|
||||||
.buildkite/scripts/cache-rocm-base-wheels.sh upload
|
|
||||||
|
|
||||||
# Export base Docker image for reuse in vLLM build
|
|
||||||
mkdir -p artifacts/rocm-docker-image
|
|
||||||
docker save rocm/vllm-dev:base-$${BUILDKITE_BUILD_NUMBER} | gzip > artifacts/rocm-docker-image/rocm-base-image.tar.gz
|
|
||||||
echo "Docker image size:"
|
|
||||||
ls -lh artifacts/rocm-docker-image/
|
|
||||||
|
|
||||||
# Upload large Docker image to S3 (also cached by cache key)
|
|
||||||
S3_ARTIFACT_PATH="s3://$${S3_BUCKET}/rocm/cache/$${CACHE_KEY}"
|
|
||||||
echo "Uploading Docker image to $${S3_ARTIFACT_PATH}/"
|
|
||||||
aws s3 cp artifacts/rocm-docker-image/rocm-base-image.tar.gz "$${S3_ARTIFACT_PATH}/rocm-base-image.tar.gz"
|
|
||||||
|
|
||||||
# Save the S3 path for downstream jobs
|
|
||||||
buildkite-agent meta-data set "rocm-docker-image-s3-path" "$${S3_ARTIFACT_PATH}/rocm-base-image.tar.gz"
|
|
||||||
|
|
||||||
# Mark that we did NOT use cache
|
|
||||||
buildkite-agent meta-data set "rocm-used-cache" "false"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Build complete. Wheels cached for future builds."
|
|
||||||
fi
|
|
||||||
artifact_paths:
|
|
||||||
- "artifacts/rocm-base-wheels/*.whl"
|
|
||||||
env:
|
env:
|
||||||
DOCKER_BUILDKIT: "1"
|
DOCKER_BUILDKIT: "1"
|
||||||
S3_BUCKET: "vllm-wheels"
|
|
||||||
|
|
||||||
# ROCm Job 2: Build vLLM ROCm Wheel
|
- block: "Build Neuron release image"
|
||||||
- label: ":python: Build vLLM ROCm Wheel - x86_64"
|
key: block-neuron-release-image-build
|
||||||
id: build-rocm-vllm-wheel
|
depends_on: ~
|
||||||
depends_on:
|
|
||||||
- step: build-rocm-base-wheels
|
- label: "Build and publish Neuron release image"
|
||||||
allow_failure: false
|
depends_on: block-neuron-release-image-build
|
||||||
agents:
|
agents:
|
||||||
queue: cpu_queue_postmerge
|
queue: neuron-postmerge
|
||||||
timeout_in_minutes: 180
|
|
||||||
commands:
|
commands:
|
||||||
# Download artifacts and prepare Docker image
|
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
|
||||||
- |
|
- "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --tag public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:$(buildkite-agent meta-data get release-version) --tag public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:latest --progress plain -f docker/Dockerfile.neuron ."
|
||||||
set -euo pipefail
|
- "docker push public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:$(buildkite-agent meta-data get release-version)"
|
||||||
|
|
||||||
# Ensure git tags are up-to-date (Buildkite's default fetch doesn't update tags)
|
|
||||||
# This fixes version detection when tags are moved/force-pushed
|
|
||||||
echo "Fetching latest tags from origin..."
|
|
||||||
git fetch --tags --force origin
|
|
||||||
|
|
||||||
# Log tag information for debugging version detection
|
|
||||||
echo "========================================"
|
|
||||||
echo "Git Tag Verification"
|
|
||||||
echo "========================================"
|
|
||||||
echo "Current HEAD: $(git rev-parse HEAD)"
|
|
||||||
echo "git describe --tags: $(git describe --tags 2>/dev/null || echo 'No tags found')"
|
|
||||||
echo ""
|
|
||||||
echo "Recent tags (pointing to commits near HEAD):"
|
|
||||||
git tag -l --sort=-creatordate | head -5
|
|
||||||
echo "setuptools_scm version detection:"
|
|
||||||
pip install -q setuptools_scm 2>/dev/null || true
|
|
||||||
python3 -c "import setuptools_scm; print(' Detected version:', setuptools_scm.get_version())" 2>/dev/null || echo " (setuptools_scm not available in this environment)"
|
|
||||||
echo "========================================"
|
|
||||||
|
|
||||||
# Download wheel artifacts from current build
|
|
||||||
echo "Downloading wheel artifacts from current build"
|
|
||||||
buildkite-agent artifact download "artifacts/rocm-base-wheels/*.whl" .
|
|
||||||
|
|
||||||
# Download Docker image from S3 (too large for Buildkite artifacts)
|
|
||||||
DOCKER_IMAGE_S3_PATH="$$(buildkite-agent meta-data get rocm-docker-image-s3-path 2>/dev/null || echo '')"
|
|
||||||
if [ -z "$${DOCKER_IMAGE_S3_PATH}" ]; then
|
|
||||||
echo "ERROR: rocm-docker-image-s3-path metadata not found"
|
|
||||||
echo "This should have been set by the build-rocm-base-wheels job"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "Downloading Docker image from $${DOCKER_IMAGE_S3_PATH}"
|
|
||||||
mkdir -p artifacts/rocm-docker-image
|
|
||||||
aws s3 cp "$${DOCKER_IMAGE_S3_PATH}" artifacts/rocm-docker-image/rocm-base-image.tar.gz
|
|
||||||
|
|
||||||
# Load base Docker image and capture the tag
|
|
||||||
echo "Loading base Docker image..."
|
|
||||||
LOAD_OUTPUT=$$(gunzip -c artifacts/rocm-docker-image/rocm-base-image.tar.gz | docker load)
|
|
||||||
echo "$${LOAD_OUTPUT}"
|
|
||||||
# Extract the actual loaded image tag from "Loaded image: <tag>" output
|
|
||||||
# This avoids picking up stale images (like rocm/vllm-dev:nightly) already on the agent
|
|
||||||
BASE_IMAGE_TAG=$$(echo "$${LOAD_OUTPUT}" | grep "Loaded image:" | sed 's/Loaded image: //')
|
|
||||||
if [ -z "$${BASE_IMAGE_TAG}" ]; then
|
|
||||||
echo "ERROR: Failed to extract image tag from docker load output"
|
|
||||||
echo "Load output was: $${LOAD_OUTPUT}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "Loaded base image: $${BASE_IMAGE_TAG}"
|
|
||||||
|
|
||||||
# Prepare base wheels for Docker build context
|
|
||||||
mkdir -p docker/context/base-wheels
|
|
||||||
touch docker/context/base-wheels/.keep
|
|
||||||
cp artifacts/rocm-base-wheels/*.whl docker/context/base-wheels/
|
|
||||||
echo "Base wheels for vLLM build:"
|
|
||||||
ls -lh docker/context/base-wheels/
|
|
||||||
|
|
||||||
# Get GPU architectures from meta-data
|
|
||||||
PYTORCH_ROCM_ARCH="$$(buildkite-agent meta-data get rocm-pytorch-rocm-arch 2>/dev/null || echo '')"
|
|
||||||
PYTORCH_ROCM_ARCH="$${PYTORCH_ROCM_ARCH:-gfx90a;gfx942;gfx950;gfx1100;gfx1101;gfx1200;gfx1201;gfx1150;gfx1151}"
|
|
||||||
|
|
||||||
echo "========================================"
|
|
||||||
echo "Building vLLM wheel with:"
|
|
||||||
echo " BUILDKITE_COMMIT: $${BUILDKITE_COMMIT}"
|
|
||||||
echo " BUILDKITE_BRANCH: $${BUILDKITE_BRANCH}"
|
|
||||||
echo " PYTORCH_ROCM_ARCH: $${PYTORCH_ROCM_ARCH}"
|
|
||||||
echo " BASE_IMAGE: $${BASE_IMAGE_TAG}"
|
|
||||||
echo "========================================"
|
|
||||||
|
|
||||||
# Build vLLM wheel using local checkout (REMOTE_VLLM=0)
|
|
||||||
DOCKER_BUILDKIT=1 docker build \
|
|
||||||
--file docker/Dockerfile.rocm \
|
|
||||||
--target export_vllm_wheel_release \
|
|
||||||
--output type=local,dest=rocm-dist \
|
|
||||||
--build-arg BASE_IMAGE="$${BASE_IMAGE_TAG}" \
|
|
||||||
--build-arg ARG_PYTORCH_ROCM_ARCH="$${PYTORCH_ROCM_ARCH}" \
|
|
||||||
--build-arg REMOTE_VLLM=0 \
|
|
||||||
--build-arg GIT_REPO_CHECK=1 \
|
|
||||||
--build-arg USE_SCCACHE=1 \
|
|
||||||
--build-arg SCCACHE_BUCKET_NAME=vllm-build-sccache \
|
|
||||||
--build-arg SCCACHE_REGION_NAME=us-west-2 \
|
|
||||||
--build-arg SCCACHE_S3_NO_CREDENTIALS=0 \
|
|
||||||
.
|
|
||||||
|
|
||||||
echo "Built vLLM wheel:"
|
|
||||||
ls -lh rocm-dist/*.whl
|
|
||||||
|
|
||||||
# Copy wheel to artifacts directory
|
|
||||||
mkdir -p artifacts/rocm-vllm-wheel
|
|
||||||
cp rocm-dist/*.whl artifacts/rocm-vllm-wheel/
|
|
||||||
echo "Final vLLM wheel:"
|
|
||||||
ls -lh artifacts/rocm-vllm-wheel/
|
|
||||||
artifact_paths:
|
|
||||||
- "artifacts/rocm-vllm-wheel/*.whl"
|
|
||||||
env:
|
env:
|
||||||
DOCKER_BUILDKIT: "1"
|
DOCKER_BUILDKIT: "1"
|
||||||
S3_BUCKET: "vllm-wheels"
|
|
||||||
|
|
||||||
# ROCm Job 3: Upload Wheels to S3
|
|
||||||
- label: ":s3: Upload ROCm Wheels to S3"
|
|
||||||
id: upload-rocm-wheels
|
|
||||||
depends_on:
|
|
||||||
- step: build-rocm-vllm-wheel
|
|
||||||
allow_failure: false
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
timeout_in_minutes: 60
|
|
||||||
commands:
|
|
||||||
# Download all wheel artifacts and run upload
|
|
||||||
- |
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Check if upload is enabled (from env var, meta-data, or release branch)
|
|
||||||
ROCM_UPLOAD_WHEELS="$${ROCM_UPLOAD_WHEELS:-}"
|
|
||||||
if [ -z "$${ROCM_UPLOAD_WHEELS}" ]; then
|
|
||||||
# Try to get from meta-data (input form)
|
|
||||||
ROCM_UPLOAD_WHEELS="$$(buildkite-agent meta-data get rocm-upload-wheels 2>/dev/null || echo '')"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "========================================"
|
|
||||||
echo "Upload check:"
|
|
||||||
echo " ROCM_UPLOAD_WHEELS: $${ROCM_UPLOAD_WHEELS}"
|
|
||||||
echo " BUILDKITE_BRANCH: $${BUILDKITE_BRANCH}"
|
|
||||||
echo "========================================"
|
|
||||||
|
|
||||||
# Skip upload if not enabled
|
|
||||||
if [ "$${ROCM_UPLOAD_WHEELS}" != "true" ]; then
|
|
||||||
echo "Skipping S3 upload (ROCM_UPLOAD_WHEELS != true, NIGHTLY != 1, not a release branch)"
|
|
||||||
echo "To enable upload, set 'Upload Wheels to S3' to 'Yes' in the build configuration"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Upload enabled, proceeding..."
|
|
||||||
|
|
||||||
# Download artifacts from current build
|
|
||||||
echo "Downloading artifacts from current build"
|
|
||||||
buildkite-agent artifact download "artifacts/rocm-base-wheels/*.whl" .
|
|
||||||
buildkite-agent artifact download "artifacts/rocm-vllm-wheel/*.whl" .
|
|
||||||
|
|
||||||
# Run upload script
|
|
||||||
bash .buildkite/scripts/upload-rocm-wheels.sh
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
S3_BUCKET: "vllm-wheels"
|
|
||||||
|
|
||||||
# ROCm Job 4: Annotate ROCm Wheel Release
|
|
||||||
- label: ":memo: Annotate ROCm wheel release"
|
|
||||||
id: annotate-rocm-release
|
|
||||||
depends_on:
|
|
||||||
- step: upload-rocm-wheels
|
|
||||||
allow_failure: true
|
|
||||||
- step: input-release-version
|
|
||||||
allow_failure: true
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "bash .buildkite/scripts/annotate-rocm-release.sh"
|
|
||||||
env:
|
|
||||||
S3_BUCKET: "vllm-wheels"
|
|
||||||
|
|
||||||
# ROCm Job 5: Generate Root Index for ROCm Wheels (for release only)
|
|
||||||
# This is the job to create https://wheels.vllm.ai/rocm/ index allowing
|
|
||||||
# users to install with `uv pip install vllm --extra-index-url https://wheels.vllm.ai/rocm/`
|
|
||||||
- block: "Generate Root Index for ROCm Wheels for Release"
|
|
||||||
key: block-generate-root-index-rocm-wheels
|
|
||||||
depends_on: upload-rocm-wheels
|
|
||||||
|
|
||||||
- label: ":package: Generate Root Index for ROCm Wheels for Release"
|
|
||||||
depends_on: block-generate-root-index-rocm-wheels
|
|
||||||
id: generate-root-index-rocm-wheels
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
commands:
|
|
||||||
- "bash tools/vllm-rocm/generate-rocm-wheels-root-index.sh"
|
|
||||||
env:
|
|
||||||
S3_BUCKET: "vllm-wheels"
|
|
||||||
VARIANT: "rocm700"
|
|
||||||
|
|
||||||
# ROCm Job 5: Build ROCm Release Docker Image
|
|
||||||
- label: ":docker: Build release image - x86_64 - ROCm"
|
|
||||||
id: build-rocm-release-image
|
|
||||||
depends_on:
|
|
||||||
- step: build-rocm-base-wheels
|
|
||||||
allow_failure: false
|
|
||||||
agents:
|
|
||||||
queue: cpu_queue_postmerge
|
|
||||||
timeout_in_minutes: 60
|
|
||||||
commands:
|
|
||||||
- |
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Login to ECR
|
|
||||||
aws ecr-public get-login-password --region us-east-1 | \
|
|
||||||
docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7
|
|
||||||
|
|
||||||
# Download Docker image from S3 (set by build-rocm-base-wheels)
|
|
||||||
DOCKER_IMAGE_S3_PATH="$$(buildkite-agent meta-data get rocm-docker-image-s3-path 2>/dev/null || echo '')"
|
|
||||||
if [ -z "$${DOCKER_IMAGE_S3_PATH}" ]; then
|
|
||||||
echo "ERROR: rocm-docker-image-s3-path metadata not found"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Downloading base image from $${DOCKER_IMAGE_S3_PATH}"
|
|
||||||
mkdir -p artifacts/rocm-docker-image
|
|
||||||
aws s3 cp "$${DOCKER_IMAGE_S3_PATH}" artifacts/rocm-docker-image/rocm-base-image.tar.gz
|
|
||||||
|
|
||||||
# Load base Docker image
|
|
||||||
echo "Loading base Docker image..."
|
|
||||||
LOAD_OUTPUT=$$(gunzip -c artifacts/rocm-docker-image/rocm-base-image.tar.gz | docker load)
|
|
||||||
BASE_IMAGE_TAG=$$(echo "$${LOAD_OUTPUT}" | grep "Loaded image:" | sed 's/Loaded image: //')
|
|
||||||
echo "Loaded base image: $${BASE_IMAGE_TAG}"
|
|
||||||
|
|
||||||
# Tag and push the base image to ECR
|
|
||||||
docker tag "$${BASE_IMAGE_TAG}" public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm-base
|
|
||||||
docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm-base
|
|
||||||
echo "Pushed base image: public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm-base"
|
|
||||||
|
|
||||||
# Get GPU architectures from meta-data
|
|
||||||
PYTORCH_ROCM_ARCH="$$(buildkite-agent meta-data get rocm-pytorch-rocm-arch 2>/dev/null || echo '')"
|
|
||||||
PYTORCH_ROCM_ARCH="$${PYTORCH_ROCM_ARCH:-gfx90a;gfx942;gfx950;gfx1100;gfx1101;gfx1200;gfx1201;gfx1150;gfx1151}"
|
|
||||||
|
|
||||||
# Build vLLM ROCm release image using cached base
|
|
||||||
DOCKER_BUILDKIT=1 docker build \
|
|
||||||
--build-arg max_jobs=16 \
|
|
||||||
--build-arg BASE_IMAGE="$${BASE_IMAGE_TAG}" \
|
|
||||||
--build-arg ARG_PYTORCH_ROCM_ARCH="$${PYTORCH_ROCM_ARCH}" \
|
|
||||||
--build-arg USE_SCCACHE=1 \
|
|
||||||
--build-arg SCCACHE_BUCKET_NAME=vllm-build-sccache \
|
|
||||||
--build-arg SCCACHE_REGION_NAME=us-west-2 \
|
|
||||||
--build-arg SCCACHE_S3_NO_CREDENTIALS=0 \
|
|
||||||
--tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm \
|
|
||||||
--target vllm-openai \
|
|
||||||
--progress plain \
|
|
||||||
-f docker/Dockerfile.rocm .
|
|
||||||
|
|
||||||
# Push to ECR
|
|
||||||
docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm
|
|
||||||
echo "Pushed: public.ecr.aws/q9t5s3a7/vllm-release-repo:$${BUILDKITE_COMMIT}-rocm"
|
|
||||||
env:
|
|
||||||
DOCKER_BUILDKIT: "1"
|
|
||||||
S3_BUCKET: "vllm-wheels"
|
|
||||||
|
|||||||
@@ -1,117 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# Get release version, default to 1.0.0.dev for nightly/per-commit builds
|
|
||||||
RELEASE_VERSION=$(buildkite-agent meta-data get release-version 2>/dev/null | sed 's/^v//')
|
|
||||||
if [ -z "${RELEASE_VERSION}" ]; then
|
|
||||||
RELEASE_VERSION="1.0.0.dev"
|
|
||||||
fi
|
|
||||||
|
|
||||||
buildkite-agent annotate --style 'info' --context 'release-workflow' << EOF
|
|
||||||
To download the wheel (by commit):
|
|
||||||
\`\`\`
|
|
||||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux_2_31_x86_64.whl .
|
|
||||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux_2_31_aarch64.whl .
|
|
||||||
|
|
||||||
(Optional) For CUDA 13.0:
|
|
||||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cu130-cp38-abi3-manylinux_2_35_x86_64.whl .
|
|
||||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cu130-cp38-abi3-manylinux_2_35_aarch64.whl .
|
|
||||||
|
|
||||||
(Optional) For CPU:
|
|
||||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cpu-cp38-abi3-manylinux_2_35_x86_64.whl .
|
|
||||||
aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cpu-cp38-abi3-manylinux_2_35_aarch64.whl .
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
|
|
||||||
To download and upload the image:
|
|
||||||
|
|
||||||
\`\`\`
|
|
||||||
# Download images:
|
|
||||||
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64-cu130
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64-cu130
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v${RELEASE_VERSION}
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo:v${RELEASE_VERSION}
|
|
||||||
|
|
||||||
# Tag and push images:
|
|
||||||
|
|
||||||
## CUDA
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64 vllm/vllm-openai:x86_64
|
|
||||||
docker tag vllm/vllm-openai:x86_64 vllm/vllm-openai:latest-x86_64
|
|
||||||
docker tag vllm/vllm-openai:x86_64 vllm/vllm-openai:v${RELEASE_VERSION}-x86_64
|
|
||||||
docker push vllm/vllm-openai:latest-x86_64
|
|
||||||
docker push vllm/vllm-openai:v${RELEASE_VERSION}-x86_64
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64-cu130 vllm/vllm-openai:x86_64-cu130
|
|
||||||
docker tag vllm/vllm-openai:x86_64-cu130 vllm/vllm-openai:latest-x86_64-cu130
|
|
||||||
docker tag vllm/vllm-openai:x86_64-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-x86_64-cu130
|
|
||||||
docker push vllm/vllm-openai:latest-x86_64-cu130
|
|
||||||
docker push vllm/vllm-openai:v${RELEASE_VERSION}-x86_64-cu130
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64 vllm/vllm-openai:aarch64
|
|
||||||
docker tag vllm/vllm-openai:aarch64 vllm/vllm-openai:latest-aarch64
|
|
||||||
docker tag vllm/vllm-openai:aarch64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
|
||||||
docker push vllm/vllm-openai:latest-aarch64
|
|
||||||
docker push vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64-cu130 vllm/vllm-openai:aarch64-cu130
|
|
||||||
docker tag vllm/vllm-openai:aarch64-cu130 vllm/vllm-openai:latest-aarch64-cu130
|
|
||||||
docker tag vllm/vllm-openai:aarch64-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64-cu130
|
|
||||||
docker push vllm/vllm-openai:latest-aarch64-cu130
|
|
||||||
docker push vllm/vllm-openai:v${RELEASE_VERSION}-aarch64-cu130
|
|
||||||
|
|
||||||
## ROCm
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}
|
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT} vllm/vllm-openai-rocm:latest
|
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT} vllm/vllm-openai-rocm:v${RELEASE_VERSION}
|
|
||||||
docker push vllm/vllm-openai-rocm:latest
|
|
||||||
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base
|
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:latest-base
|
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
|
||||||
docker push vllm/vllm-openai-rocm:latest-base
|
|
||||||
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
|
||||||
|
|
||||||
## CPU
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v${RELEASE_VERSION} vllm/vllm-openai-cpu:x86_64
|
|
||||||
docker tag vllm/vllm-openai-cpu:x86_64 vllm/vllm-openai-cpu:latest-x86_64
|
|
||||||
docker tag vllm/vllm-openai-cpu:x86_64 vllm/vllm-openai-cpu:v${RELEASE_VERSION}-x86_64
|
|
||||||
docker push vllm/vllm-openai-cpu:latest-x86_64
|
|
||||||
docker push vllm/vllm-openai-cpu:v${RELEASE_VERSION}-x86_64
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo:v${RELEASE_VERSION} vllm/vllm-openai-cpu:arm64
|
|
||||||
docker tag vllm/vllm-openai-cpu:arm64 vllm/vllm-openai-cpu:latest-arm64
|
|
||||||
docker tag vllm/vllm-openai-cpu:arm64 vllm/vllm-openai-cpu:v${RELEASE_VERSION}-arm64
|
|
||||||
docker push vllm/vllm-openai-cpu:latest-arm64
|
|
||||||
docker push vllm/vllm-openai-cpu:v${RELEASE_VERSION}-arm64
|
|
||||||
|
|
||||||
# Create multi-arch manifest:
|
|
||||||
|
|
||||||
docker manifest rm vllm/vllm-openai:latest
|
|
||||||
docker manifest create vllm/vllm-openai:latest vllm/vllm-openai:latest-x86_64 vllm/vllm-openai:latest-aarch64
|
|
||||||
docker manifest create vllm/vllm-openai:v${RELEASE_VERSION} vllm/vllm-openai:v${RELEASE_VERSION}-x86_64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
|
||||||
docker manifest push vllm/vllm-openai:latest
|
|
||||||
docker manifest push vllm/vllm-openai:v${RELEASE_VERSION}
|
|
||||||
|
|
||||||
docker manifest rm vllm/vllm-openai:latest-cu130
|
|
||||||
docker manifest create vllm/vllm-openai:latest-cu130 vllm/vllm-openai:latest-x86_64-cu130 vllm/vllm-openai:latest-aarch64-cu130
|
|
||||||
docker manifest create vllm/vllm-openai:v${RELEASE_VERSION}-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-x86_64-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64-cu130
|
|
||||||
docker manifest push vllm/vllm-openai:latest-cu130
|
|
||||||
docker manifest push vllm/vllm-openai:v${RELEASE_VERSION}-cu130
|
|
||||||
|
|
||||||
docker manifest rm vllm/vllm-openai-cpu:latest || true
|
|
||||||
docker manifest create vllm/vllm-openai-cpu:latest vllm/vllm-openai-cpu:latest-x86_64 vllm/vllm-openai-cpu:latest-arm64
|
|
||||||
docker manifest create vllm/vllm-openai-cpu:v${RELEASE_VERSION} vllm/vllm-openai-cpu:v${RELEASE_VERSION}-x86_64 vllm/vllm-openai-cpu:v${RELEASE_VERSION}-arm64
|
|
||||||
docker manifest push vllm/vllm-openai-cpu:latest
|
|
||||||
docker manifest push vllm/vllm-openai-cpu:v${RELEASE_VERSION}
|
|
||||||
\`\`\`
|
|
||||||
EOF
|
|
||||||
@@ -1,112 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
#
|
|
||||||
# Generate Buildkite annotation for ROCm wheel release
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# Get build configuration from meta-data
|
|
||||||
# Extract ROCm version dynamically from Dockerfile.rocm_base
|
|
||||||
# BASE_IMAGE format: rocm/dev-ubuntu-22.04:7.0-complete -> extracts "7.0"
|
|
||||||
ROCM_VERSION=$(grep -E '^ARG BASE_IMAGE=' docker/Dockerfile.rocm_base | sed -E 's/.*:([0-9]+\.[0-9]+).*/\1/' || echo "unknown")
|
|
||||||
PYTHON_VERSION=$(buildkite-agent meta-data get rocm-python-version 2>/dev/null || echo "3.12")
|
|
||||||
PYTORCH_ROCM_ARCH=$(buildkite-agent meta-data get rocm-pytorch-rocm-arch 2>/dev/null || echo "gfx90a;gfx942;gfx950;gfx1100;gfx1101;gfx1200;gfx1201;gfx1150;gfx1151")
|
|
||||||
|
|
||||||
# TODO: Enable the nightly build for ROCm
|
|
||||||
# Get release version, default to 1.0.0.dev for nightly/per-commit builds
|
|
||||||
RELEASE_VERSION=$(buildkite-agent meta-data get release-version 2>/dev/null || echo "")
|
|
||||||
if [ -z "${RELEASE_VERSION}" ]; then
|
|
||||||
RELEASE_VERSION="1.0.0.dev"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# S3 URLs
|
|
||||||
S3_BUCKET="${S3_BUCKET:-vllm-wheels}"
|
|
||||||
S3_REGION="${AWS_DEFAULT_REGION:-us-west-2}"
|
|
||||||
S3_URL="http://${S3_BUCKET}.s3-website-${S3_REGION}.amazonaws.com"
|
|
||||||
|
|
||||||
# Format ROCm version for path (e.g., "7.1" -> "rocm710")
|
|
||||||
ROCM_VERSION_PATH="rocm$(echo "${ROCM_VERSION}" | tr -d '.')"
|
|
||||||
ROCM_PATH="rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}"
|
|
||||||
buildkite-agent annotate --style 'success' --context 'rocm-release-workflow' << EOF
|
|
||||||
## ROCm Wheel and Docker Image Releases
|
|
||||||
### Build Configuration
|
|
||||||
| Setting | Value |
|
|
||||||
|---------|-------|
|
|
||||||
| **ROCm Version** | ${ROCM_VERSION} |
|
|
||||||
| **Python Version** | ${PYTHON_VERSION} |
|
|
||||||
| **GPU Architectures** | ${PYTORCH_ROCM_ARCH} |
|
|
||||||
| **Branch** | \`${BUILDKITE_BRANCH}\` |
|
|
||||||
| **Commit** | \`${BUILDKITE_COMMIT}\` |
|
|
||||||
|
|
||||||
### :package: Installation
|
|
||||||
|
|
||||||
**Install from this build (by commit):**
|
|
||||||
|
|
||||||
\`\`\`bash
|
|
||||||
pip install vllm --extra-index-url ${S3_URL}/${ROCM_PATH}/ --trusted-host ${S3_BUCKET}.s3-website-${S3_REGION}.amazonaws.com
|
|
||||||
|
|
||||||
# Example for ROCm ${ROCM_VERSION}:
|
|
||||||
pip install vllm --extra-index-url ${S3_URL}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/ --trusted-host ${S3_BUCKET}.s3-website-${S3_REGION}.amazonaws.com
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
**Install from nightly (if published):**
|
|
||||||
|
|
||||||
\`\`\`bash
|
|
||||||
pip install vllm --extra-index-url ${S3_URL}/rocm/nightly/ --trusted-host ${S3_BUCKET}.s3-website-${S3_REGION}.amazonaws.com
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
### :floppy_disk: Download Wheels Directly
|
|
||||||
|
|
||||||
\`\`\`bash
|
|
||||||
# List all ROCm wheels
|
|
||||||
aws s3 ls s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/
|
|
||||||
# Download specific wheels
|
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/vllm-*.whl .
|
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/torch-*.whl .
|
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/triton-*.whl .
|
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/triton-kernels-*.whl .
|
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/torchvision-*.whl .
|
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/torchaudio-*.whl .
|
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/amdsmi-*.whl .
|
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/amd_aiter-*.whl .
|
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/flash-attn-*.whl .
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
### :gear: Included Packages
|
|
||||||
- **vllm**: vLLM with ROCm support
|
|
||||||
- **torch**: PyTorch built for ROCm ${ROCM_VERSION}
|
|
||||||
- **triton**: Triton
|
|
||||||
- **triton-kernels**: Triton kernels
|
|
||||||
- **torchvision**: TorchVision for ROCm PyTorch
|
|
||||||
- **torchaudio**: Torchaudio for ROCm PyTorch
|
|
||||||
- **amdsmi**: AMD SMI Python bindings
|
|
||||||
- **amd_aiter**: Aiter for ROCm
|
|
||||||
- **flash-attn**: Flash Attention for ROCm
|
|
||||||
|
|
||||||
### :warning: Notes
|
|
||||||
- These wheels are built for **ROCm ${ROCM_VERSION}** and will NOT work with CUDA GPUs
|
|
||||||
- Supported GPU architectures: ${PYTORCH_ROCM_ARCH}
|
|
||||||
- Platform: Linux x86_64 only
|
|
||||||
|
|
||||||
### :package: Docker Image Release
|
|
||||||
|
|
||||||
To download and upload the image:
|
|
||||||
|
|
||||||
\`\`\`
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base
|
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:latest-base
|
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
|
||||||
docker push vllm/vllm-openai-rocm:latest-base
|
|
||||||
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}
|
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT} vllm/vllm-openai-rocm:latest
|
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT} vllm/vllm-openai-rocm:v${RELEASE_VERSION}
|
|
||||||
docker push vllm/vllm-openai-rocm:latest
|
|
||||||
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
EOF
|
|
||||||
@@ -1,140 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
#
|
|
||||||
# Cache helper for ROCm base wheels
|
|
||||||
#
|
|
||||||
# This script manages caching of pre-built ROCm base wheels (torch, triton, etc.)
|
|
||||||
# to avoid rebuilding them when Dockerfile.rocm_base hasn't changed.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
# cache-rocm-base-wheels.sh check - Check if cache exists, outputs "hit" or "miss"
|
|
||||||
# cache-rocm-base-wheels.sh upload - Upload wheels to cache
|
|
||||||
# cache-rocm-base-wheels.sh download - Download wheels from cache
|
|
||||||
# cache-rocm-base-wheels.sh key - Output the cache key
|
|
||||||
#
|
|
||||||
# Environment variables:
|
|
||||||
# S3_BUCKET - S3 bucket name (default: vllm-wheels)
|
|
||||||
# PYTHON_VERSION - Python version (affects cache key)
|
|
||||||
# PYTORCH_ROCM_ARCH - GPU architectures (affects cache key)
|
|
||||||
#
|
|
||||||
# Note: ROCm version is determined by BASE_IMAGE in Dockerfile.rocm_base,
|
|
||||||
# so changes to ROCm version are captured by the Dockerfile hash.
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
BUCKET="${S3_BUCKET:-vllm-wheels}"
|
|
||||||
DOCKERFILE="docker/Dockerfile.rocm_base"
|
|
||||||
CACHE_PREFIX="rocm/cache"
|
|
||||||
|
|
||||||
# Generate hash from Dockerfile content + build args
|
|
||||||
generate_cache_key() {
|
|
||||||
# Include Dockerfile content
|
|
||||||
if [[ ! -f "$DOCKERFILE" ]]; then
|
|
||||||
echo "ERROR: Dockerfile not found: $DOCKERFILE" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
local dockerfile_hash=$(sha256sum "$DOCKERFILE" | cut -c1-16)
|
|
||||||
|
|
||||||
# Include key build args that affect the output
|
|
||||||
# These should match the ARGs in Dockerfile.rocm_base that change the build output
|
|
||||||
# Note: ROCm version is determined by BASE_IMAGE in the Dockerfile, so it's captured by dockerfile_hash
|
|
||||||
local args_string="${PYTHON_VERSION:-}|${PYTORCH_ROCM_ARCH:-}"
|
|
||||||
local args_hash=$(echo "$args_string" | sha256sum | cut -c1-8)
|
|
||||||
|
|
||||||
echo "${dockerfile_hash}-${args_hash}"
|
|
||||||
}
|
|
||||||
|
|
||||||
CACHE_KEY=$(generate_cache_key)
|
|
||||||
CACHE_PATH="s3://${BUCKET}/${CACHE_PREFIX}/${CACHE_KEY}/"
|
|
||||||
|
|
||||||
case "${1:-}" in
|
|
||||||
check)
|
|
||||||
echo "Checking cache for key: ${CACHE_KEY}" >&2
|
|
||||||
echo "Cache path: ${CACHE_PATH}" >&2
|
|
||||||
echo "Variables used in cache key:" >&2
|
|
||||||
echo " PYTHON_VERSION: ${PYTHON_VERSION:-<not set>}" >&2
|
|
||||||
echo " PYTORCH_ROCM_ARCH: ${PYTORCH_ROCM_ARCH:-<not set>}" >&2
|
|
||||||
|
|
||||||
# Check if cache exists by listing objects
|
|
||||||
# We look for at least one .whl file
|
|
||||||
echo "Running: aws s3 ls ${CACHE_PATH}" >&2
|
|
||||||
S3_OUTPUT=$(aws s3 ls "${CACHE_PATH}" 2>&1) || true
|
|
||||||
echo "S3 ls output:" >&2
|
|
||||||
echo "$S3_OUTPUT" | head -5 >&2
|
|
||||||
|
|
||||||
if echo "$S3_OUTPUT" | grep -q "\.whl"; then
|
|
||||||
echo "hit"
|
|
||||||
else
|
|
||||||
echo "miss"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
|
|
||||||
upload)
|
|
||||||
echo "========================================"
|
|
||||||
echo "Uploading wheels to cache"
|
|
||||||
echo "========================================"
|
|
||||||
echo "Cache key: ${CACHE_KEY}"
|
|
||||||
echo "Cache path: ${CACHE_PATH}"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
if [[ ! -d "artifacts/rocm-base-wheels" ]]; then
|
|
||||||
echo "ERROR: artifacts/rocm-base-wheels directory not found" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
WHEEL_COUNT=$(find artifacts/rocm-base-wheels -maxdepth 1 -name '*.whl' 2>/dev/null | wc -l)
|
|
||||||
if [[ "$WHEEL_COUNT" -eq 0 ]]; then
|
|
||||||
echo "ERROR: No wheels found in artifacts/rocm-base-wheels/" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Uploading $WHEEL_COUNT wheels..."
|
|
||||||
aws s3 cp --recursive artifacts/rocm-base-wheels/ "${CACHE_PATH}"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Cache upload complete!"
|
|
||||||
echo "========================================"
|
|
||||||
;;
|
|
||||||
|
|
||||||
download)
|
|
||||||
echo "========================================"
|
|
||||||
echo "Downloading wheels from cache"
|
|
||||||
echo "========================================"
|
|
||||||
echo "Cache key: ${CACHE_KEY}"
|
|
||||||
echo "Cache path: ${CACHE_PATH}"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
mkdir -p artifacts/rocm-base-wheels
|
|
||||||
aws s3 cp --recursive "${CACHE_PATH}" artifacts/rocm-base-wheels/
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Downloaded wheels:"
|
|
||||||
find artifacts/rocm-base-wheels -maxdepth 1 -name '*.whl' -exec ls -lh {} \;
|
|
||||||
|
|
||||||
WHEEL_COUNT=$(find artifacts/rocm-base-wheels -maxdepth 1 -name '*.whl' 2>/dev/null | wc -l)
|
|
||||||
echo ""
|
|
||||||
echo "Total: $WHEEL_COUNT wheels"
|
|
||||||
echo "========================================"
|
|
||||||
;;
|
|
||||||
|
|
||||||
key)
|
|
||||||
echo "${CACHE_KEY}"
|
|
||||||
;;
|
|
||||||
|
|
||||||
path)
|
|
||||||
echo "${CACHE_PATH}"
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "Usage: $0 {check|upload|download|key|path}" >&2
|
|
||||||
echo "" >&2
|
|
||||||
echo "Commands:" >&2
|
|
||||||
echo " check - Check if cache exists, outputs 'hit' or 'miss'" >&2
|
|
||||||
echo " upload - Upload wheels from artifacts/rocm-base-wheels/ to cache" >&2
|
|
||||||
echo " download - Download wheels from cache to artifacts/rocm-base-wheels/" >&2
|
|
||||||
echo " key - Output the cache key" >&2
|
|
||||||
echo " path - Output the full S3 cache path" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
@@ -1,205 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
#
|
|
||||||
# Check if Ray LLM can generate lock files that are compatible with this
|
|
||||||
# version of vllm. Downloads Ray's requirement files and runs a full
|
|
||||||
# dependency resolution with the installed vllm's constraints to see if
|
|
||||||
# a valid lock file can be produced.
|
|
||||||
#
|
|
||||||
# See: https://github.com/vllm-project/vllm/issues/33599
|
|
||||||
|
|
||||||
set -eo pipefail
|
|
||||||
|
|
||||||
RAY_BASE_URL="https://raw.githubusercontent.com/ray-project/ray/master/python"
|
|
||||||
|
|
||||||
WORK_DIR=$(mktemp -d)
|
|
||||||
trap 'rm -rf "$WORK_DIR"' EXIT
|
|
||||||
|
|
||||||
# Fetch all Ray requirement files used in the LLM depset pipeline
|
|
||||||
echo ">>> Fetching Ray requirement files"
|
|
||||||
RAY_FILES=(
|
|
||||||
"requirements.txt"
|
|
||||||
"requirements/cloud-requirements.txt"
|
|
||||||
"requirements/base-test-requirements.txt"
|
|
||||||
"requirements/llm/llm-requirements.txt"
|
|
||||||
"requirements/llm/llm-test-requirements.txt"
|
|
||||||
)
|
|
||||||
for FILE in "${RAY_FILES[@]}"; do
|
|
||||||
LOCAL_PATH="${WORK_DIR}/$(basename "$FILE")"
|
|
||||||
echo " ${FILE}"
|
|
||||||
curl -fsSL -o "$LOCAL_PATH" "${RAY_BASE_URL}/${FILE}"
|
|
||||||
done
|
|
||||||
|
|
||||||
# Extract installed vllm deps
|
|
||||||
echo ">>> Extracting installed vllm dependency constraints"
|
|
||||||
python3 - "${WORK_DIR}/vllm-constraints.txt" <<'PYEOF'
|
|
||||||
"""Write out the installed vllm's dependencies as pip constraint lines.
|
|
||||||
|
|
||||||
Ray uses vllm[audio], so audio-extra deps are included with their extra
|
|
||||||
markers stripped. The resolver cannot evaluate extra markers for a
|
|
||||||
package that is not itself being resolved from an index, so we activate
|
|
||||||
them manually here.
|
|
||||||
"""
|
|
||||||
import importlib.metadata
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
|
|
||||||
out_path = sys.argv[1]
|
|
||||||
raw_reqs = importlib.metadata.requires("vllm") or []
|
|
||||||
|
|
||||||
# Ray uses vllm[audio] – activate that extra.
|
|
||||||
ACTIVE_EXTRAS = {"audio"}
|
|
||||||
EXTRA_RE = re.compile(r"""extra\s*==\s*['"]([^'"]+)['"]""")
|
|
||||||
|
|
||||||
lines = []
|
|
||||||
for r in raw_reqs:
|
|
||||||
if ";" not in r:
|
|
||||||
# Unconditional dep — always include.
|
|
||||||
lines.append(r.strip())
|
|
||||||
continue
|
|
||||||
|
|
||||||
req_part, _, marker_part = r.partition(";")
|
|
||||||
marker_part = marker_part.strip()
|
|
||||||
|
|
||||||
extra_matches = EXTRA_RE.findall(marker_part)
|
|
||||||
if not extra_matches:
|
|
||||||
# Non-extra marker (python_version, etc.) — keep as-is.
|
|
||||||
lines.append(r.strip())
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not ACTIVE_EXTRAS.intersection(extra_matches):
|
|
||||||
continue # Skip inactive extras (tensorizer, bench, …).
|
|
||||||
|
|
||||||
# Strip the extra== conditions but keep any remaining markers
|
|
||||||
# (e.g. python_version).
|
|
||||||
cleaned = EXTRA_RE.sub("", marker_part)
|
|
||||||
cleaned = re.sub(r"\band\b\s*\band\b", "and", cleaned)
|
|
||||||
cleaned = re.sub(r"^\s*and\s+|\s+and\s*$", "", cleaned).strip()
|
|
||||||
|
|
||||||
if cleaned:
|
|
||||||
lines.append(f"{req_part.strip()} ; {cleaned}")
|
|
||||||
else:
|
|
||||||
lines.append(req_part.strip())
|
|
||||||
|
|
||||||
with open(out_path, "w") as f:
|
|
||||||
for line in lines:
|
|
||||||
f.write(line + "\n")
|
|
||||||
|
|
||||||
print(f"Wrote {len(lines)} constraints to {out_path}")
|
|
||||||
PYEOF
|
|
||||||
|
|
||||||
echo ">>> Installed vllm deps (first 20 lines):"
|
|
||||||
head -20 "${WORK_DIR}/vllm-constraints.txt"
|
|
||||||
|
|
||||||
# Remove Ray's vllm pin — the installed vllm's transitive deps
|
|
||||||
# (written above) replace it in the resolution. vllm itself cannot
|
|
||||||
# be resolved from PyPI for in-development versions, so we test
|
|
||||||
# whether Ray's requirements can coexist with vllm's dependency
|
|
||||||
# constraints instead.
|
|
||||||
sed -i '/^vllm/d' "${WORK_DIR}/llm-requirements.txt"
|
|
||||||
|
|
||||||
# Install uv if needed
|
|
||||||
if ! command -v uv &>/dev/null; then
|
|
||||||
echo ">>> Installing uv"
|
|
||||||
pip install uv -q
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Resolve: given vllm's constraints, can Ray compile a lock file?
|
|
||||||
#
|
|
||||||
# vllm's dependency constraints are the fixed side — Ray is flexible and
|
|
||||||
# can regenerate its lock files. We pass vllm's constraints via -c so
|
|
||||||
# the resolver treats them as non-negotiable bounds, then check whether
|
|
||||||
# Ray's own requirements can still be satisfied within those bounds.
|
|
||||||
echo ""
|
|
||||||
echo "============================================================"
|
|
||||||
echo ">>> Resolving: Can Ray generate compatible lock files?"
|
|
||||||
echo "============================================================"
|
|
||||||
|
|
||||||
set +e
|
|
||||||
uv pip compile \
|
|
||||||
"${WORK_DIR}/requirements.txt" \
|
|
||||||
"${WORK_DIR}/cloud-requirements.txt" \
|
|
||||||
"${WORK_DIR}/base-test-requirements.txt" \
|
|
||||||
"${WORK_DIR}/llm-requirements.txt" \
|
|
||||||
"${WORK_DIR}/llm-test-requirements.txt" \
|
|
||||||
-c "${WORK_DIR}/vllm-constraints.txt" \
|
|
||||||
--python-version 3.12 \
|
|
||||||
--python-platform x86_64-manylinux_2_31 \
|
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu129 \
|
|
||||||
--index-strategy unsafe-best-match \
|
|
||||||
--unsafe-package setuptools \
|
|
||||||
--unsafe-package ray \
|
|
||||||
--no-header \
|
|
||||||
-o "${WORK_DIR}/resolved.txt" \
|
|
||||||
2>&1
|
|
||||||
EXIT_CODE=$?
|
|
||||||
set -e
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "=========================================="
|
|
||||||
if [ $EXIT_CODE -eq 0 ]; then
|
|
||||||
echo "SUCCESS: Ray can generate lock files compatible with this vllm."
|
|
||||||
echo ""
|
|
||||||
echo "Key resolved versions:"
|
|
||||||
grep -E '^(protobuf|torch|numpy|transformers)==' \
|
|
||||||
"${WORK_DIR}/resolved.txt" | sort || true
|
|
||||||
echo "=========================================="
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "FAILURE: Ray cannot generate lock files compatible with this vllm."
|
|
||||||
echo "This means a fundamental dependency conflict exists that Ray"
|
|
||||||
echo "cannot resolve by regenerating its lock files."
|
|
||||||
echo "See: https://github.com/vllm-project/vllm/issues/33599"
|
|
||||||
echo "=========================================="
|
|
||||||
|
|
||||||
# Buildkite annotation
|
|
||||||
if [ -f /usr/bin/buildkite-agent ]; then
|
|
||||||
buildkite-agent annotate --style 'warning' --context 'ray-compat' << EOF
|
|
||||||
### :warning: Ray Dependency Compatibility Warning
|
|
||||||
This PR introduces dependencies that **cannot** be resolved with Ray's requirements.
|
|
||||||
Ray would not be able to regenerate its lock files to accommodate this vllm version.
|
|
||||||
|
|
||||||
Please check the **Ray Dependency Compatibility Check** step logs for details.
|
|
||||||
See [issue #33599](https://github.com/vllm-project/vllm/issues/33599) for context.
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Notify Slack if webhook is configured.
|
|
||||||
if [ -n "$RAY_COMPAT_SLACK_WEBHOOK_URL" ]; then
|
|
||||||
echo ">>> Sending Slack notification"
|
|
||||||
# Single quotes are intentional: the f-string expressions are Python, not shell.
|
|
||||||
# shellcheck disable=SC2016
|
|
||||||
PAYLOAD=$(python3 -c '
|
|
||||||
import json, os, sys
|
|
||||||
pr = os.getenv("BUILDKITE_PULL_REQUEST", "N/A")
|
|
||||||
branch = os.getenv("BUILDKITE_BRANCH", "unknown")
|
|
||||||
url = os.getenv("BUILDKITE_BUILD_URL", "#")
|
|
||||||
data = {
|
|
||||||
"text": ":warning: Ray Dependency Compatibility Check Failed",
|
|
||||||
"blocks": [{
|
|
||||||
"type": "section",
|
|
||||||
"text": {
|
|
||||||
"type": "mrkdwn",
|
|
||||||
"text": (
|
|
||||||
"*:warning: Ray Dependency Compatibility Check Failed*\n"
|
|
||||||
f"PR #{pr} on branch `{branch}` introduces dependencies "
|
|
||||||
f"that cannot be resolved with Ray'\''s requirements.\n"
|
|
||||||
f"<{url}|View Build>"
|
|
||||||
),
|
|
||||||
},
|
|
||||||
}],
|
|
||||||
}
|
|
||||||
print(json.dumps(data))
|
|
||||||
')
|
|
||||||
|
|
||||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$RAY_COMPAT_SLACK_WEBHOOK_URL" \
|
|
||||||
-H 'Content-type: application/json' \
|
|
||||||
-d "$PAYLOAD")
|
|
||||||
echo " Slack webhook response: $HTTP_CODE"
|
|
||||||
else
|
|
||||||
echo ">>> Skipping Slack notification (RAY_COMPAT_SLACK_WEBHOOK_URL not set)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit 1
|
|
||||||
@@ -1,242 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# cherry-pick-from-milestone.sh
|
|
||||||
# Find commits from a GitHub milestone that are missing from the current branch
|
|
||||||
# and output them in chronological order for cherry-picking.
|
|
||||||
#
|
|
||||||
# Usage: ./cherry-pick-from-milestone.sh <milestone> [--dry-run] [--execute]
|
|
||||||
#
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Colors for output
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
BLUE='\033[0;34m'
|
|
||||||
NC='\033[0m' # No Color
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
cat <<EOF
|
|
||||||
Usage: $(basename "$0") <milestone> [options]
|
|
||||||
|
|
||||||
Find commits from a GitHub milestone that need to be cherry-picked into the current branch.
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
milestone The GitHub milestone name (e.g., v0.14.0)
|
|
||||||
|
|
||||||
Options:
|
|
||||||
--dry-run Show the cherry-pick commands without executing (default)
|
|
||||||
--execute Actually execute the cherry-picks
|
|
||||||
--main-branch Specify the main branch name (default: main)
|
|
||||||
--help Show this help message
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
$(basename "$0") v0.14.0
|
|
||||||
$(basename "$0") v0.14.0 --dry-run
|
|
||||||
$(basename "$0") v0.14.0 --execute
|
|
||||||
$(basename "$0") v0.14.0 --main-branch master
|
|
||||||
EOF
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
log_info() {
|
|
||||||
echo -e "${BLUE}[INFO]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_success() {
|
|
||||||
echo -e "${GREEN}[OK]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_warn() {
|
|
||||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
|
||||||
}
|
|
||||||
|
|
||||||
log_error() {
|
|
||||||
echo -e "${RED}[ERROR]${NC} $1" >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
# Default values
|
|
||||||
MILESTONE=""
|
|
||||||
DRY_RUN=true
|
|
||||||
MAIN_BRANCH="main"
|
|
||||||
|
|
||||||
# Parse arguments
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
--dry-run)
|
|
||||||
DRY_RUN=true
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--execute)
|
|
||||||
DRY_RUN=false
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--main-branch)
|
|
||||||
MAIN_BRANCH="$2"
|
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
--help|-h)
|
|
||||||
usage
|
|
||||||
;;
|
|
||||||
-*)
|
|
||||||
log_error "Unknown option: $1"
|
|
||||||
usage
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
if [[ -z "$MILESTONE" ]]; then
|
|
||||||
MILESTONE="$1"
|
|
||||||
else
|
|
||||||
log_error "Unexpected argument: $1"
|
|
||||||
usage
|
|
||||||
fi
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
# Validate milestone argument
|
|
||||||
if [[ -z "$MILESTONE" ]]; then
|
|
||||||
log_error "Milestone is required"
|
|
||||||
usage
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if we're in a git repository
|
|
||||||
if ! git rev-parse --is-inside-work-tree &>/dev/null; then
|
|
||||||
log_error "Not in a git repository"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if gh CLI is available
|
|
||||||
if ! command -v gh &>/dev/null; then
|
|
||||||
log_error "GitHub CLI (gh) is not installed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if authenticated with gh
|
|
||||||
if ! gh auth status &>/dev/null; then
|
|
||||||
log_error "Not authenticated with GitHub CLI. Run 'gh auth login' first."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
CURRENT_BRANCH=$(git branch --show-current)
|
|
||||||
log_info "Current branch: ${CURRENT_BRANCH}"
|
|
||||||
log_info "Main branch: ${MAIN_BRANCH}"
|
|
||||||
log_info "Milestone: ${MILESTONE}"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Fetch latest from remote
|
|
||||||
log_info "Fetching latest from remote..."
|
|
||||||
git fetch origin "$MAIN_BRANCH" --quiet
|
|
||||||
|
|
||||||
# Get merged PRs from the milestone, sorted by merge date
|
|
||||||
log_info "Fetching merged PRs from milestone '${MILESTONE}'..."
|
|
||||||
|
|
||||||
# Store PR data in a temp file
|
|
||||||
PR_DATA=$(mktemp)
|
|
||||||
trap 'rm -f "$PR_DATA"' EXIT
|
|
||||||
|
|
||||||
if ! gh pr list --state merged --search "milestone:${MILESTONE}" \
|
|
||||||
--limit 1000 \
|
|
||||||
--json number,title,mergeCommit,mergedAt \
|
|
||||||
--jq 'sort_by(.mergedAt) | .[] | "\(.mergeCommit.oid)\t\(.number)\t\(.title)"' > "$PR_DATA" 2>/dev/null; then
|
|
||||||
log_error "Failed to fetch PRs from milestone '${MILESTONE}'"
|
|
||||||
log_error "This could be due to:"
|
|
||||||
log_error " - Milestone does not exist"
|
|
||||||
log_error " - Network/authentication issues"
|
|
||||||
log_error " - Invalid milestone name format"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ ! -s "$PR_DATA" ]]; then
|
|
||||||
log_warn "No merged PRs found for milestone '${MILESTONE}'"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
TOTAL_PRS=$(wc -l < "$PR_DATA")
|
|
||||||
log_info "Found ${TOTAL_PRS} merged PR(s) in milestone"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Find commits that are missing from current branch
|
|
||||||
MISSING_COMMITS=()
|
|
||||||
MISSING_INFO=()
|
|
||||||
|
|
||||||
while IFS=$'\t' read -r sha pr_number title; do
|
|
||||||
# Skip if SHA is empty or null
|
|
||||||
if [[ -z "$sha" || "$sha" == "null" ]]; then
|
|
||||||
log_warn "PR #${pr_number} has no merge commit SHA, skipping"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if this commit is already in the current branch
|
|
||||||
if git merge-base --is-ancestor "$sha" HEAD 2>/dev/null; then
|
|
||||||
log_success "PR #${pr_number} already in branch: ${title:0:60}"
|
|
||||||
else
|
|
||||||
log_warn "PR #${pr_number} MISSING: ${title:0:60}"
|
|
||||||
MISSING_COMMITS+=("$sha")
|
|
||||||
MISSING_INFO+=("$sha PR #${pr_number}: ${title}")
|
|
||||||
fi
|
|
||||||
done < "$PR_DATA"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
if [[ ${#MISSING_COMMITS[@]} -eq 0 ]]; then
|
|
||||||
log_success "All PRs from milestone '${MILESTONE}' are already in the current branch!"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_info "Found ${#MISSING_COMMITS[@]} missing commit(s) to cherry-pick"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Output the cherry-pick commands
|
|
||||||
echo "=========================================="
|
|
||||||
echo "Cherry-pick commands (in chronological order):"
|
|
||||||
echo "=========================================="
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
for info in "${MISSING_INFO[@]}"; do
|
|
||||||
echo "# $info"
|
|
||||||
done
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
echo "# Run these commands to cherry-pick all missing commits:"
|
|
||||||
echo "git cherry-pick ${MISSING_COMMITS[*]}"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Or one by one
|
|
||||||
echo "# Or cherry-pick one at a time:"
|
|
||||||
for sha in "${MISSING_COMMITS[@]}"; do
|
|
||||||
echo "git cherry-pick $sha"
|
|
||||||
done
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Execute if requested
|
|
||||||
if [[ "$DRY_RUN" == false ]]; then
|
|
||||||
echo "=========================================="
|
|
||||||
log_info "Executing cherry-picks..."
|
|
||||||
echo "=========================================="
|
|
||||||
|
|
||||||
for i in "${!MISSING_COMMITS[@]}"; do
|
|
||||||
sha="${MISSING_COMMITS[$i]}"
|
|
||||||
info="${MISSING_INFO[$i]}"
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
log_info "Cherry-picking: $info"
|
|
||||||
|
|
||||||
if git cherry-pick "$sha"; then
|
|
||||||
log_success "Successfully cherry-picked $sha"
|
|
||||||
else
|
|
||||||
log_error "Failed to cherry-pick $sha"
|
|
||||||
log_error "Resolve conflicts and run 'git cherry-pick --continue', or 'git cherry-pick --abort' to cancel"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
log_success "All cherry-picks completed successfully!"
|
|
||||||
else
|
|
||||||
echo "=========================================="
|
|
||||||
echo -e "${YELLOW}Dry run mode - no changes made${NC}"
|
|
||||||
echo "Run with --execute to perform the cherry-picks"
|
|
||||||
echo "=========================================="
|
|
||||||
fi
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Usage: ./ci_clean_log.sh ci.log
|
|
||||||
# This script strips timestamps and color codes from CI log files.
|
|
||||||
|
|
||||||
# Check if argument is given
|
|
||||||
if [ $# -lt 1 ]; then
|
|
||||||
echo "Usage: $0 ci.log"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
INPUT_FILE="$1"
|
|
||||||
|
|
||||||
# Strip timestamps
|
|
||||||
sed -i 's/^\[[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\}T[0-9]\{2\}:[0-9]\{2\}:[0-9]\{2\}Z\] //' "$INPUT_FILE"
|
|
||||||
|
|
||||||
# Strip colorization
|
|
||||||
sed -i -r 's/\x1B\[[0-9;]*[mK]//g' "$INPUT_FILE"
|
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# Clean up old nightly builds from DockerHub, keeping only the last 14 builds
|
|
||||||
# This script uses DockerHub API to list and delete old tags with specified prefix
|
|
||||||
# Usage: cleanup-nightly-builds.sh [TAG_PREFIX]
|
|
||||||
# Example: cleanup-nightly-builds.sh "nightly-" or cleanup-nightly-builds.sh "cu130-nightly-"
|
|
||||||
|
|
||||||
# Get tag prefix from argument, default to "nightly-" if not provided
|
|
||||||
TAG_PREFIX="${1:-nightly-}"
|
|
||||||
|
|
||||||
echo "Cleaning up tags with prefix: $TAG_PREFIX"
|
|
||||||
|
|
||||||
# DockerHub API endpoint for vllm/vllm-openai repository
|
|
||||||
REPO_API_URL="https://hub.docker.com/v2/repositories/vllm/vllm-openai/tags"
|
|
||||||
|
|
||||||
# Get DockerHub credentials from environment
|
|
||||||
if [ -z "$DOCKERHUB_TOKEN" ]; then
|
|
||||||
echo "Error: DOCKERHUB_TOKEN environment variable is not set"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$DOCKERHUB_USERNAME" ]; then
|
|
||||||
echo "Error: DOCKERHUB_USERNAME environment variable is not set"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get DockerHub bearer token
|
|
||||||
echo "Getting DockerHub bearer token..."
|
|
||||||
set +x
|
|
||||||
BEARER_TOKEN=$(curl -s -X POST \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d "{\"username\": \"$DOCKERHUB_USERNAME\", \"password\": \"$DOCKERHUB_TOKEN\"}" \
|
|
||||||
"https://hub.docker.com/v2/users/login" | jq -r '.token')
|
|
||||||
set -x
|
|
||||||
|
|
||||||
if [ -z "$BEARER_TOKEN" ] || [ "$BEARER_TOKEN" = "null" ]; then
|
|
||||||
echo "Error: Failed to get DockerHub bearer token"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Function to get all tags from DockerHub
|
|
||||||
get_all_tags() {
|
|
||||||
local page=1
|
|
||||||
local all_tags=""
|
|
||||||
|
|
||||||
while true; do
|
|
||||||
set +x
|
|
||||||
local response=$(curl -s -H "Authorization: Bearer $BEARER_TOKEN" \
|
|
||||||
"$REPO_API_URL?page=$page&page_size=100")
|
|
||||||
set -x
|
|
||||||
|
|
||||||
# Get both last_updated timestamp and tag name, separated by |
|
|
||||||
local tags=$(echo "$response" | jq -r --arg prefix "$TAG_PREFIX" '.results[] | select(.name | startswith($prefix)) | "\(.last_updated)|\(.name)"')
|
|
||||||
|
|
||||||
if [ -z "$tags" ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
|
|
||||||
all_tags="$all_tags$tags"$'\n'
|
|
||||||
page=$((page + 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
# Sort by timestamp (newest first) and extract just the tag names
|
|
||||||
echo "$all_tags" | sort -r | cut -d'|' -f2
|
|
||||||
}
|
|
||||||
|
|
||||||
delete_tag() {
|
|
||||||
local tag_name="$1"
|
|
||||||
echo "Deleting tag: $tag_name"
|
|
||||||
|
|
||||||
local delete_url="https://hub.docker.com/v2/repositories/vllm/vllm-openai/tags/$tag_name"
|
|
||||||
set +x
|
|
||||||
local response=$(curl -s -X DELETE -H "Authorization: Bearer $BEARER_TOKEN" "$delete_url")
|
|
||||||
set -x
|
|
||||||
|
|
||||||
if echo "$response" | jq -e '.detail' > /dev/null 2>&1; then
|
|
||||||
echo "Warning: Failed to delete tag $tag_name: $(echo "$response" | jq -r '.detail')"
|
|
||||||
else
|
|
||||||
echo "Successfully deleted tag: $tag_name"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get all nightly- prefixed tags, sorted by last_updated timestamp (newest first)
|
|
||||||
echo "Fetching all tags from DockerHub..."
|
|
||||||
all_tags=$(get_all_tags)
|
|
||||||
|
|
||||||
if [ -z "$all_tags" ]; then
|
|
||||||
echo "No tags found to clean up"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Count total tags
|
|
||||||
total_tags=$(echo "$all_tags" | wc -l)
|
|
||||||
echo "Found $total_tags tags"
|
|
||||||
|
|
||||||
# Keep only the last 14 builds (including the current one)
|
|
||||||
tags_to_keep=14
|
|
||||||
tags_to_delete=$((total_tags - tags_to_keep))
|
|
||||||
|
|
||||||
if [ $tags_to_delete -le 0 ]; then
|
|
||||||
echo "No tags need to be deleted (only $total_tags tags found, keeping $tags_to_keep)"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Will delete $tags_to_delete old tags, keeping the newest $tags_to_keep"
|
|
||||||
|
|
||||||
# Get tags to delete (skip the first $tags_to_keep tags)
|
|
||||||
tags_to_delete_list=$(echo "$all_tags" | tail -n +$((tags_to_keep + 1)))
|
|
||||||
|
|
||||||
if [ -z "$tags_to_delete_list" ]; then
|
|
||||||
echo "No tags to delete"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Delete old tags
|
|
||||||
echo "Deleting old tags..."
|
|
||||||
while IFS= read -r tag; do
|
|
||||||
if [ -n "$tag" ]; then
|
|
||||||
delete_tag "$tag"
|
|
||||||
# Add a small delay to avoid rate limiting
|
|
||||||
sleep 1
|
|
||||||
fi
|
|
||||||
done <<< "$tags_to_delete_list"
|
|
||||||
|
|
||||||
echo "Cleanup completed successfully"
|
|
||||||
@@ -1,468 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
|
|
||||||
# do not complain about line length (for docstring)
|
|
||||||
# ruff: noqa: E501
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
from dataclasses import asdict, dataclass
|
|
||||||
from datetime import datetime
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
from urllib.parse import quote
|
|
||||||
|
|
||||||
import regex as re
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_package_name(name: str) -> str:
|
|
||||||
"""
|
|
||||||
Normalize package name according to PEP 503.
|
|
||||||
https://peps.python.org/pep-0503/#normalized-names
|
|
||||||
|
|
||||||
Replace runs of underscores, hyphens, and periods with a single hyphen,
|
|
||||||
and lowercase the result.
|
|
||||||
"""
|
|
||||||
return re.sub(r"[-_.]+", "-", name).lower()
|
|
||||||
|
|
||||||
|
|
||||||
if not sys.version_info >= (3, 12):
|
|
||||||
raise RuntimeError("This script requires Python 3.12 or higher.")
|
|
||||||
|
|
||||||
INDEX_HTML_TEMPLATE = """<!DOCTYPE html>
|
|
||||||
<html>
|
|
||||||
<!-- {comment} -->
|
|
||||||
<meta name="pypi:repository-version" content="1.0">
|
|
||||||
<body>
|
|
||||||
{items}
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class WheelFileInfo:
|
|
||||||
package_name: str
|
|
||||||
version: str
|
|
||||||
build_tag: str | None
|
|
||||||
python_tag: str
|
|
||||||
abi_tag: str
|
|
||||||
platform_tag: str
|
|
||||||
variant: str | None
|
|
||||||
filename: str
|
|
||||||
|
|
||||||
|
|
||||||
def parse_from_filename(file: str) -> WheelFileInfo:
|
|
||||||
"""
|
|
||||||
Parse wheel file name to extract metadata.
|
|
||||||
|
|
||||||
The format of wheel names:
|
|
||||||
{package_name}-{version}(-{build_tag})?-{python_tag}-{abi_tag}-{platform_tag}.whl
|
|
||||||
All versions could contain a variant like '+cu129' or '.cpu' or `.rocm` (or not).
|
|
||||||
Example:
|
|
||||||
vllm-0.11.0-cp38-abi3-manylinux1_x86_64.whl
|
|
||||||
vllm-0.10.2rc2+cu129-cp38-abi3-manylinux2014_aarch64.whl
|
|
||||||
vllm-0.11.1rc8.dev14+gaa384b3c0-cp38-abi3-manylinux2014_aarch64.whl
|
|
||||||
vllm-0.11.1rc8.dev14+gaa384b3c0.cu130-cp38-abi3-manylinux1_x86_64.whl
|
|
||||||
"""
|
|
||||||
wheel_file_re = re.compile(
|
|
||||||
r"^(?P<package_name>.+)-(?P<version>[^-]+?)(-(?P<build_tag>[^-]+))?-(?P<python_tag>[^-]+)-(?P<abi_tag>[^-]+)-(?P<platform_tag>[^-]+)\.whl$"
|
|
||||||
)
|
|
||||||
match = wheel_file_re.match(file)
|
|
||||||
if not match:
|
|
||||||
raise ValueError(f"Invalid wheel file name: {file}")
|
|
||||||
|
|
||||||
package_name = match.group("package_name")
|
|
||||||
version = match.group("version")
|
|
||||||
build_tag = match.group("build_tag")
|
|
||||||
python_tag = match.group("python_tag")
|
|
||||||
abi_tag = match.group("abi_tag")
|
|
||||||
platform_tag = match.group("platform_tag")
|
|
||||||
|
|
||||||
# extract variant from version
|
|
||||||
variant = None
|
|
||||||
if "dev" in version:
|
|
||||||
ver_after_dev = version.split("dev")[-1]
|
|
||||||
if "." in ver_after_dev:
|
|
||||||
variant = ver_after_dev.split(".")[-1]
|
|
||||||
version = version.removesuffix("." + variant)
|
|
||||||
else:
|
|
||||||
if "+" in version:
|
|
||||||
version_part, suffix = version.split("+", 1)
|
|
||||||
# Only treat known patterns as variants (rocmXXX, cuXXX, cpu)
|
|
||||||
# Git hashes and other suffixes are NOT variants
|
|
||||||
if suffix.startswith(("rocm", "cu", "cpu")):
|
|
||||||
variant = suffix
|
|
||||||
version = version_part
|
|
||||||
# Otherwise keep the full version string (variant stays None)
|
|
||||||
|
|
||||||
return WheelFileInfo(
|
|
||||||
package_name=package_name,
|
|
||||||
version=version,
|
|
||||||
build_tag=build_tag,
|
|
||||||
python_tag=python_tag,
|
|
||||||
abi_tag=abi_tag,
|
|
||||||
platform_tag=platform_tag,
|
|
||||||
variant=variant,
|
|
||||||
filename=file,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_project_list(subdir_names: list[str], comment: str = "") -> str:
|
|
||||||
"""
|
|
||||||
Generate project list HTML content linking to each project & variant subdirectory.
|
|
||||||
"""
|
|
||||||
href_tags = []
|
|
||||||
for name in sorted(subdir_names):
|
|
||||||
name = name.strip("/").strip(".")
|
|
||||||
href_tags.append(f' <a href="{name}/">{name}/</a><br/>')
|
|
||||||
return INDEX_HTML_TEMPLATE.format(items="\n".join(href_tags), comment=comment)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_package_index_and_metadata(
|
|
||||||
wheel_files: list[WheelFileInfo],
|
|
||||||
wheel_base_dir: Path,
|
|
||||||
index_base_dir: Path,
|
|
||||||
comment: str = "",
|
|
||||||
) -> tuple[str, str]:
|
|
||||||
"""
|
|
||||||
Generate package index HTML content for a specific package, linking to actual wheel files.
|
|
||||||
"""
|
|
||||||
href_tags = []
|
|
||||||
metadata = []
|
|
||||||
for file in sorted(wheel_files, key=lambda x: x.filename):
|
|
||||||
relative_path = (
|
|
||||||
wheel_base_dir.relative_to(index_base_dir, walk_up=True) / file.filename
|
|
||||||
)
|
|
||||||
# handle with '+' in URL, and avoid double-encoding '/' and already-encoded '%2B'
|
|
||||||
# NOTE: this is AWS S3 specific behavior!
|
|
||||||
file_path_quoted = quote(relative_path.as_posix(), safe=":%/")
|
|
||||||
href_tags.append(f' <a href="{file_path_quoted}">{file.filename}</a><br/>')
|
|
||||||
file_meta = asdict(file)
|
|
||||||
file_meta["path"] = file_path_quoted
|
|
||||||
metadata.append(file_meta)
|
|
||||||
index_str = INDEX_HTML_TEMPLATE.format(items="\n".join(href_tags), comment=comment)
|
|
||||||
metadata_str = json.dumps(metadata, indent=2)
|
|
||||||
return index_str, metadata_str
|
|
||||||
|
|
||||||
|
|
||||||
def generate_index_and_metadata(
|
|
||||||
whl_files: list[str],
|
|
||||||
wheel_base_dir: Path,
|
|
||||||
index_base_dir: Path,
|
|
||||||
default_variant: str | None = None,
|
|
||||||
alias_to_default: str | None = None,
|
|
||||||
comment: str = "",
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Generate index for all wheel files.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
whl_files (list[str]): List of wheel files (must be directly under `wheel_base_dir`).
|
|
||||||
wheel_base_dir (Path): Base directory for wheel files.
|
|
||||||
index_base_dir (Path): Base directory to store index files.
|
|
||||||
default_variant (str | None): The default variant name, if any.
|
|
||||||
alias_to_default (str | None): Alias variant name for the default variant, if any.
|
|
||||||
comment (str | None): Optional comment to include in the generated HTML files.
|
|
||||||
|
|
||||||
First, parse all wheel files to extract metadata.
|
|
||||||
We need to collect all wheel files for each variant, and generate an index for it (in a subdirectory).
|
|
||||||
The index for the default variant (if any) is generated in the root index directory.
|
|
||||||
|
|
||||||
If `default_variant` is provided, all wheels must have variant suffixes, and the default variant index
|
|
||||||
is purely a copy of the corresponding variant index, with only the links adjusted.
|
|
||||||
Otherwise, all wheels without variant suffixes are treated as the default variant.
|
|
||||||
|
|
||||||
If `alias_to_default` is provided, an additional alias subdirectory is created, it has the same content
|
|
||||||
as the default variant index, but the links are adjusted accordingly.
|
|
||||||
|
|
||||||
Index directory structure:
|
|
||||||
index_base_dir/ (hosted at wheels.vllm.ai/{nightly,$commit,$version}/)
|
|
||||||
index.html # project list, linking to "vllm/" and other packages, and all variant subdirectories
|
|
||||||
vllm/
|
|
||||||
index.html # package index, pointing to actual files in wheel_base_dir (relative path)
|
|
||||||
metadata.json # machine-readable metadata for all wheels in this package
|
|
||||||
cpu/ # cpu variant subdirectory
|
|
||||||
index.html
|
|
||||||
vllm/
|
|
||||||
index.html
|
|
||||||
metadata.json
|
|
||||||
cu129/ # cu129 is actually the alias to default variant
|
|
||||||
index.html
|
|
||||||
vllm/
|
|
||||||
index.html
|
|
||||||
metadata.json
|
|
||||||
cu130/ # cu130 variant subdirectory
|
|
||||||
index.html
|
|
||||||
vllm/
|
|
||||||
index.html
|
|
||||||
metadata.json
|
|
||||||
...
|
|
||||||
|
|
||||||
metadata.json stores a dump of all wheel files' metadata in a machine-readable format:
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"package_name": "vllm",
|
|
||||||
"version": "0.10.2rc2",
|
|
||||||
"build_tag": null,
|
|
||||||
"python_tag": "cp38",
|
|
||||||
"abi_tag": "abi3",
|
|
||||||
"platform_tag": "manylinux2014_aarch64",
|
|
||||||
"variant": "cu129",
|
|
||||||
"filename": "vllm-0.10.2rc2+cu129-cp38-abi3-manylinux2014_aarch64.whl",
|
|
||||||
"path": "../vllm-0.10.2rc2%2Bcu129-cp38-abi3-manylinux2014_aarch64.whl" # to be concatenated with the directory URL and URL-encoded
|
|
||||||
},
|
|
||||||
...
|
|
||||||
]
|
|
||||||
"""
|
|
||||||
|
|
||||||
parsed_files = [parse_from_filename(f) for f in whl_files]
|
|
||||||
|
|
||||||
if not parsed_files:
|
|
||||||
print("No wheel files found, skipping index generation.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# For ROCm builds: inherit variant from vllm wheel
|
|
||||||
# All ROCm wheels should share the same variant as vllm
|
|
||||||
rocm_variant = None
|
|
||||||
for file in parsed_files:
|
|
||||||
if (
|
|
||||||
file.package_name == "vllm"
|
|
||||||
and file.variant
|
|
||||||
and file.variant.startswith("rocm")
|
|
||||||
):
|
|
||||||
rocm_variant = file.variant
|
|
||||||
print(f"Detected ROCm variant from vllm: {rocm_variant}")
|
|
||||||
break
|
|
||||||
|
|
||||||
# Apply ROCm variant to all wheels without a variant
|
|
||||||
if rocm_variant:
|
|
||||||
for file in parsed_files:
|
|
||||||
if file.variant is None:
|
|
||||||
file.variant = rocm_variant
|
|
||||||
print(f"Inherited variant '{rocm_variant}' for {file.filename}")
|
|
||||||
|
|
||||||
# Group by variant
|
|
||||||
variant_to_files: dict[str, list[WheelFileInfo]] = {}
|
|
||||||
for file in parsed_files:
|
|
||||||
variant = file.variant or "default"
|
|
||||||
if variant not in variant_to_files:
|
|
||||||
variant_to_files[variant] = []
|
|
||||||
variant_to_files[variant].append(file)
|
|
||||||
|
|
||||||
print(f"Found variants: {list(variant_to_files.keys())}")
|
|
||||||
|
|
||||||
# sanity check for default variant
|
|
||||||
if default_variant:
|
|
||||||
if "default" in variant_to_files:
|
|
||||||
raise ValueError(
|
|
||||||
"All wheel files must have variant suffixes when `default_variant` is specified."
|
|
||||||
)
|
|
||||||
if default_variant not in variant_to_files:
|
|
||||||
raise ValueError(
|
|
||||||
f"Default variant '{default_variant}' not found among wheel files."
|
|
||||||
)
|
|
||||||
|
|
||||||
if alias_to_default:
|
|
||||||
if "default" not in variant_to_files:
|
|
||||||
# e.g. only some wheels are uploaded to S3 currently
|
|
||||||
print(
|
|
||||||
"[WARN] Alias to default variant specified, but no default variant found."
|
|
||||||
)
|
|
||||||
elif alias_to_default in variant_to_files:
|
|
||||||
raise ValueError(
|
|
||||||
f"Alias variant name '{alias_to_default}' already exists among wheel files."
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
variant_to_files[alias_to_default] = variant_to_files["default"].copy()
|
|
||||||
print(f"Alias variant '{alias_to_default}' created for default variant.")
|
|
||||||
|
|
||||||
# Generate comment in HTML header
|
|
||||||
comment_str = f" ({comment})" if comment else ""
|
|
||||||
comment_tmpl = f"Generated on {datetime.now().isoformat()}{comment_str}"
|
|
||||||
|
|
||||||
# Generate index for each variant
|
|
||||||
subdir_names = set()
|
|
||||||
for variant, files in variant_to_files.items():
|
|
||||||
if variant == "default":
|
|
||||||
variant_dir = index_base_dir
|
|
||||||
else:
|
|
||||||
variant_dir = index_base_dir / variant
|
|
||||||
subdir_names.add(variant)
|
|
||||||
|
|
||||||
variant_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# gather all package names in this variant (normalized per PEP 503)
|
|
||||||
packages = set(normalize_package_name(f.package_name) for f in files)
|
|
||||||
if variant == "default":
|
|
||||||
# these packages should also appear in the "project list"
|
|
||||||
# generate after all variants are processed
|
|
||||||
subdir_names = subdir_names.union(packages)
|
|
||||||
else:
|
|
||||||
# generate project list for this variant directly
|
|
||||||
project_list_str = generate_project_list(sorted(packages), comment_tmpl)
|
|
||||||
with open(variant_dir / "index.html", "w") as f:
|
|
||||||
f.write(project_list_str)
|
|
||||||
|
|
||||||
for package in packages:
|
|
||||||
# filter files belonging to this package only (compare normalized names)
|
|
||||||
package_files = [
|
|
||||||
f for f in files if normalize_package_name(f.package_name) == package
|
|
||||||
]
|
|
||||||
package_dir = variant_dir / package
|
|
||||||
package_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
index_str, metadata_str = generate_package_index_and_metadata(
|
|
||||||
package_files, wheel_base_dir, package_dir, comment
|
|
||||||
)
|
|
||||||
with open(package_dir / "index.html", "w") as f:
|
|
||||||
f.write(index_str)
|
|
||||||
with open(package_dir / "metadata.json", "w") as f:
|
|
||||||
f.write(metadata_str)
|
|
||||||
|
|
||||||
# Generate top-level project list index
|
|
||||||
project_list_str = generate_project_list(sorted(subdir_names), comment_tmpl)
|
|
||||||
with open(index_base_dir / "index.html", "w") as f:
|
|
||||||
f.write(project_list_str)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
"""
|
|
||||||
Arguments:
|
|
||||||
--version <version> : version string for the current build (e.g., commit hash)
|
|
||||||
--wheel-dir <wheel_directory> : directory containing wheel files (default to be same as `version`)
|
|
||||||
--current-objects <path_to_json> : path to JSON file containing current S3 objects listing in this version directory
|
|
||||||
--output-dir <output_directory> : directory to store generated index files
|
|
||||||
--alias-to-default <alias_variant_name> : (optional) alias variant name for the default variant
|
|
||||||
--comment <comment_string> : (optional) comment string to include in generated HTML files
|
|
||||||
"""
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Process nightly build wheel files to generate indices."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--version",
|
|
||||||
type=str,
|
|
||||||
required=True,
|
|
||||||
help="Version string for the current build (e.g., commit hash)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--current-objects",
|
|
||||||
type=str,
|
|
||||||
required=True,
|
|
||||||
help="Path to JSON file containing current S3 objects listing in this version directory",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--output-dir",
|
|
||||||
type=str,
|
|
||||||
required=True,
|
|
||||||
help="Directory to store generated index files",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--wheel-dir",
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
help="Directory containing wheel files (default to be same as `version`)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--alias-to-default",
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
help="Alias variant name for the default variant",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--comment",
|
|
||||||
type=str,
|
|
||||||
default="",
|
|
||||||
help="Optional comment string to include in generated HTML files",
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
version = args.version
|
|
||||||
# Allow rocm/ prefix, reject other slashes and all backslashes
|
|
||||||
if "\\" in version:
|
|
||||||
raise ValueError("Version string must not contain backslashes.")
|
|
||||||
if "/" in version and not version.startswith("rocm/"):
|
|
||||||
raise ValueError(
|
|
||||||
"Version string must not contain slashes (except for 'rocm/' prefix)."
|
|
||||||
)
|
|
||||||
current_objects_path = Path(args.current_objects)
|
|
||||||
output_dir = Path(args.output_dir)
|
|
||||||
if not output_dir.exists():
|
|
||||||
output_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Read current objects JSON
|
|
||||||
with open(current_objects_path) as f:
|
|
||||||
current_objects: dict[str, list[dict[str, Any]]] = json.load(f)
|
|
||||||
|
|
||||||
# current_objects looks like from list_objects_v2 S3 API:
|
|
||||||
"""
|
|
||||||
"Contents": [
|
|
||||||
{
|
|
||||||
"Key": "e2f56c309d2a28899c68975a7e104502d56deb8f/vllm-0.11.2.dev363+ge2f56c309-cp38-abi3-manylinux1_x86_64.whl",
|
|
||||||
"LastModified": "2025-11-28T14:00:32+00:00",
|
|
||||||
"ETag": "\"37a38339c7cdb61ca737021b968075df-52\"",
|
|
||||||
"ChecksumAlgorithm": [
|
|
||||||
"CRC64NVME"
|
|
||||||
],
|
|
||||||
"ChecksumType": "FULL_OBJECT",
|
|
||||||
"Size": 435649349,
|
|
||||||
"StorageClass": "STANDARD"
|
|
||||||
},
|
|
||||||
...
|
|
||||||
]
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Extract wheel file keys
|
|
||||||
wheel_files = []
|
|
||||||
for item in current_objects.get("Contents", []):
|
|
||||||
key: str = item["Key"]
|
|
||||||
if key.endswith(".whl"):
|
|
||||||
wheel_files.append(key.split("/")[-1]) # only the filename is used
|
|
||||||
|
|
||||||
print(f"Found {len(wheel_files)} wheel files for version {version}: {wheel_files}")
|
|
||||||
|
|
||||||
# keep only "official" files for a non-nightly version (specified by cli args)
|
|
||||||
PY_VERSION_RE = re.compile(r"^\d+\.\d+\.\d+([a-zA-Z0-9.+-]*)?$")
|
|
||||||
if PY_VERSION_RE.match(version):
|
|
||||||
# upload-wheels.sh ensures no "dev" is in args.version
|
|
||||||
wheel_files = list(
|
|
||||||
filter(lambda x: version in x and "dev" not in x, wheel_files)
|
|
||||||
)
|
|
||||||
print(f"Non-nightly version detected, wheel files used: {wheel_files}")
|
|
||||||
else:
|
|
||||||
print("Nightly version detected, keeping all wheel files.")
|
|
||||||
|
|
||||||
# Generate index and metadata, assuming wheels and indices are stored as:
|
|
||||||
# s3://vllm-wheels/{wheel_dir}/<wheel files>
|
|
||||||
# s3://vllm-wheels/<anything>/<index files>
|
|
||||||
#
|
|
||||||
# For ROCm builds, version is "rocm/{commit}" and indices are uploaded to:
|
|
||||||
# - rocm/{commit}/ (same as wheels)
|
|
||||||
# - rocm/nightly/
|
|
||||||
# - rocm/{version}/
|
|
||||||
# All these are under the "rocm/" prefix, so relative paths should be
|
|
||||||
# relative to "rocm/", not the bucket root.
|
|
||||||
if args.wheel_dir:
|
|
||||||
# Explicit wheel-dir provided (e.g., for version-specific indices pointing to commit dir)
|
|
||||||
wheel_dir = args.wheel_dir.strip().rstrip("/")
|
|
||||||
elif version.startswith("rocm/"):
|
|
||||||
# For rocm/commit, wheel_base_dir should be just the commit part
|
|
||||||
# so relative path from rocm/0.12.0/rocm710/vllm/ -> ../../../{commit}/
|
|
||||||
wheel_dir = version.split("/", 1)[1]
|
|
||||||
else:
|
|
||||||
wheel_dir = version
|
|
||||||
wheel_base_dir = Path(output_dir).parent / wheel_dir
|
|
||||||
index_base_dir = Path(output_dir)
|
|
||||||
|
|
||||||
generate_index_and_metadata(
|
|
||||||
whl_files=wheel_files,
|
|
||||||
wheel_base_dir=wheel_base_dir,
|
|
||||||
index_base_dir=index_base_dir,
|
|
||||||
default_variant=None,
|
|
||||||
alias_to_default=args.alias_to_default,
|
|
||||||
comment=args.comment.strip(),
|
|
||||||
)
|
|
||||||
print(f"Successfully generated index and metadata in {output_dir}")
|
|
||||||
@@ -1,57 +1,22 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# This script runs tests inside the corresponding ROCm docker container.
|
# This script runs test inside the corresponding ROCm docker container.
|
||||||
# It handles both single-node and multi-node test configurations.
|
|
||||||
#
|
|
||||||
# Multi-node detection: Instead of matching on fragile group names, we detect
|
|
||||||
# multi-node jobs structurally by looking for the bracket command syntax
|
|
||||||
# "[node0_cmds] && [node1_cmds]" or via the NUM_NODES environment variable.
|
|
||||||
#
|
|
||||||
###############################################################################
|
|
||||||
# QUOTING / COMMAND PASSING
|
|
||||||
#
|
|
||||||
# Passing commands as positional arguments ($*) is fragile when the command
|
|
||||||
# string itself contains double quotes, e.g.:
|
|
||||||
#
|
|
||||||
# bash run-amd-test.sh "export FLAGS="value" && pytest -m "not slow""
|
|
||||||
#
|
|
||||||
# The outer shell resolves the nested quotes *before* this script runs, so
|
|
||||||
# the script receives mangled input it cannot fully recover.
|
|
||||||
#
|
|
||||||
# Preferred: pass commands via the VLLM_TEST_COMMANDS environment variable:
|
|
||||||
#
|
|
||||||
# export VLLM_TEST_COMMANDS='export FLAGS="value" && pytest -m "not slow"'
|
|
||||||
# bash run-amd-test.sh
|
|
||||||
#
|
|
||||||
# Single-quoted assignment preserves all inner double quotes verbatim.
|
|
||||||
# The $* path is kept for backward compatibility but callers should migrate.
|
|
||||||
###############################################################################
|
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
# Export Python path
|
# Print ROCm version
|
||||||
export PYTHONPATH=".."
|
echo "--- Confirming Clean Initial State"
|
||||||
|
while true; do
|
||||||
|
sleep 3
|
||||||
|
if grep -q clean /opt/amdgpu/etc/gpu_state; then
|
||||||
|
echo "GPUs state is \"clean\""
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
###############################################################################
|
echo "--- ROCm info"
|
||||||
# Helper Functions
|
rocminfo
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
wait_for_clean_gpus() {
|
|
||||||
local timeout=${1:-300}
|
|
||||||
local start=$SECONDS
|
|
||||||
echo "--- Waiting for clean GPU state (timeout: ${timeout}s)"
|
|
||||||
while true; do
|
|
||||||
if grep -q clean /opt/amdgpu/etc/gpu_state; then
|
|
||||||
echo "GPUs state is \"clean\""
|
|
||||||
return
|
|
||||||
fi
|
|
||||||
if (( SECONDS - start >= timeout )); then
|
|
||||||
echo "Error: GPUs did not reach clean state within ${timeout}s" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
sleep 3
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
|
# cleanup older docker images
|
||||||
cleanup_docker() {
|
cleanup_docker() {
|
||||||
# Get Docker's root directory
|
# Get Docker's root directory
|
||||||
docker_root=$(docker info -f '{{.DockerRootDir}}')
|
docker_root=$(docker info -f '{{.DockerRootDir}}')
|
||||||
@@ -60,12 +25,15 @@ cleanup_docker() {
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "Docker root directory: $docker_root"
|
echo "Docker root directory: $docker_root"
|
||||||
|
# Check disk usage of the filesystem where Docker's root directory is located
|
||||||
disk_usage=$(df "$docker_root" | tail -1 | awk '{print $5}' | sed 's/%//')
|
disk_usage=$(df "$docker_root" | tail -1 | awk '{print $5}' | sed 's/%//')
|
||||||
|
# Define the threshold
|
||||||
threshold=70
|
threshold=70
|
||||||
if [ "$disk_usage" -gt "$threshold" ]; then
|
if [ "$disk_usage" -gt "$threshold" ]; then
|
||||||
echo "Disk usage is above $threshold%. Cleaning up Docker images and volumes..."
|
echo "Disk usage is above $threshold%. Cleaning up Docker images and volumes..."
|
||||||
|
# Remove dangling images (those that are not tagged and not used by any container)
|
||||||
docker image prune -f
|
docker image prune -f
|
||||||
|
# Remove unused volumes / force the system prune for old images as well.
|
||||||
docker volume prune -f && docker system prune --force --filter "until=72h" --all
|
docker volume prune -f && docker system prune --force --filter "until=72h" --all
|
||||||
echo "Docker images and volumes cleanup completed."
|
echo "Docker images and volumes cleanup completed."
|
||||||
else
|
else
|
||||||
@@ -73,432 +41,163 @@ cleanup_docker() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup_network() {
|
# Call the cleanup docker function
|
||||||
local max_nodes=${NUM_NODES:-2}
|
|
||||||
for node in $(seq 0 $((max_nodes - 1))); do
|
|
||||||
if docker ps -a -q -f name="node${node}" | grep -q .; then
|
|
||||||
docker stop "node${node}" || true
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if docker network ls | grep -q docker-net; then
|
|
||||||
docker network rm docker-net || true
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
is_multi_node() {
|
|
||||||
local cmds="$1"
|
|
||||||
# Primary signal: NUM_NODES environment variable set by the pipeline
|
|
||||||
if [[ "${NUM_NODES:-1}" -gt 1 ]]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
# Fallback: detect the bracket syntax structurally
|
|
||||||
# Pattern: [...] && [...] (per-node command arrays)
|
|
||||||
if [[ "$cmds" =~ \[.*\].*\&\&.*\[.*\] ]]; then
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
handle_pytest_exit() {
|
|
||||||
local exit_code=$1
|
|
||||||
if [ "$exit_code" -eq 5 ]; then
|
|
||||||
echo "Pytest exit code 5 (no tests collected) - treating as success."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
exit "$exit_code"
|
|
||||||
}
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# Pytest marker/keyword re-quoting
|
|
||||||
#
|
|
||||||
# When commands are passed through Buildkite -> shell -> $* -> bash -c,
|
|
||||||
# quotes around multi-word pytest -m/-k expressions get stripped:
|
|
||||||
# pytest -v -s -m 'not cpu_test' v1/core
|
|
||||||
# becomes:
|
|
||||||
# pytest -v -s -m not cpu_test v1/core
|
|
||||||
#
|
|
||||||
# pytest then interprets "cpu_test" as a file path, not part of the marker.
|
|
||||||
#
|
|
||||||
# This function detects unquoted expressions after -m/-k and re-quotes them
|
|
||||||
# by collecting tokens until a recognizable boundary is reached:
|
|
||||||
# - test path (contains '/')
|
|
||||||
# - test file (ends with '.py')
|
|
||||||
# - another pytest flag (--xxx or -x single-char flags)
|
|
||||||
# - command separator (&& || ; |)
|
|
||||||
# - environment variable assignment (FOO=bar)
|
|
||||||
#
|
|
||||||
# Single-word markers (e.g. -m cpu_test, -m hybrid_model) pass through
|
|
||||||
# unquoted since they have no spaces and work fine.
|
|
||||||
#
|
|
||||||
# Already-quoted expressions (containing literal single quotes) are passed
|
|
||||||
# through untouched to avoid double-quoting values injected by
|
|
||||||
# apply_rocm_test_overrides.
|
|
||||||
#
|
|
||||||
# NOTE: This ONLY fixes -m/-k flags. It cannot recover arbitrary inner
|
|
||||||
# double-quotes stripped by the calling shell (see header comment).
|
|
||||||
# Use VLLM_TEST_COMMANDS to avoid the problem entirely.
|
|
||||||
###############################################################################
|
|
||||||
re_quote_pytest_markers() {
|
|
||||||
local input="$1"
|
|
||||||
local output=""
|
|
||||||
local collecting=false
|
|
||||||
local marker_buf=""
|
|
||||||
|
|
||||||
# Strip backslash-newline continuations, then flatten remaining newlines
|
|
||||||
local flat="${input//$'\\\n'/ }"
|
|
||||||
flat="${flat//$'\n'/ }"
|
|
||||||
|
|
||||||
# Disable globbing to prevent *.py etc. from expanding during read -ra
|
|
||||||
local restore_glob
|
|
||||||
restore_glob="$(shopt -p -o noglob 2>/dev/null || true)"
|
|
||||||
set -o noglob
|
|
||||||
local -a words
|
|
||||||
read -ra words <<< "$flat"
|
|
||||||
eval "$restore_glob"
|
|
||||||
|
|
||||||
for word in "${words[@]}"; do
|
|
||||||
if $collecting; then
|
|
||||||
# If the token we're about to collect already contains a literal
|
|
||||||
# single quote, the expression was already quoted upstream.
|
|
||||||
# Flush and stop collecting.
|
|
||||||
if [[ "$word" == *"'"* ]]; then
|
|
||||||
if [[ -n "$marker_buf" ]]; then
|
|
||||||
# Should not normally happen (partial buf + quote), flush raw
|
|
||||||
output+="${marker_buf} "
|
|
||||||
marker_buf=""
|
|
||||||
fi
|
|
||||||
output+="${word} "
|
|
||||||
collecting=false
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
local is_boundary=false
|
|
||||||
case "$word" in
|
|
||||||
# Line-continuation artifact
|
|
||||||
"\\")
|
|
||||||
is_boundary=true ;;
|
|
||||||
# Command separators
|
|
||||||
"&&"|"||"|";"|"|")
|
|
||||||
is_boundary=true ;;
|
|
||||||
# Long flags (--ignore, --shard-id, etc.)
|
|
||||||
--*)
|
|
||||||
is_boundary=true ;;
|
|
||||||
# Short flags (-v, -s, -x, etc.) but NOT negative marker tokens
|
|
||||||
# like "not" which don't start with "-". Also skip -k/-m which
|
|
||||||
# would start a new marker (handled below).
|
|
||||||
-[a-zA-Z])
|
|
||||||
is_boundary=true ;;
|
|
||||||
# Test path (contains /)
|
|
||||||
*/*)
|
|
||||||
is_boundary=true ;;
|
|
||||||
# Test file (ends with .py, possibly with ::method)
|
|
||||||
*.py|*.py::*)
|
|
||||||
is_boundary=true ;;
|
|
||||||
# Environment variable assignment preceding a command (FOO=bar)
|
|
||||||
*=*)
|
|
||||||
# Only treat as boundary if it looks like VAR=value, not
|
|
||||||
# pytest filter expressions like num_gpus=2 inside markers
|
|
||||||
if [[ "$word" =~ ^[A-Z_][A-Z0-9_]*= ]]; then
|
|
||||||
is_boundary=true
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
if $is_boundary; then
|
|
||||||
# Flush the collected marker expression
|
|
||||||
if [[ "$marker_buf" == *" "* || "$marker_buf" == *"("* ]]; then
|
|
||||||
output+="'${marker_buf}' "
|
|
||||||
else
|
|
||||||
output+="${marker_buf} "
|
|
||||||
fi
|
|
||||||
collecting=false
|
|
||||||
marker_buf=""
|
|
||||||
# Check if this boundary word itself starts a new -m/-k
|
|
||||||
if [[ "$word" == "-m" || "$word" == "-k" ]]; then
|
|
||||||
output+="${word} "
|
|
||||||
collecting=true
|
|
||||||
# Drop stray backslash tokens silently
|
|
||||||
elif [[ "$word" == "\\" ]]; then
|
|
||||||
:
|
|
||||||
else
|
|
||||||
output+="${word} "
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# Accumulate into marker buffer
|
|
||||||
if [[ -n "$marker_buf" ]]; then
|
|
||||||
marker_buf+=" ${word}"
|
|
||||||
else
|
|
||||||
marker_buf="${word}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
elif [[ "$word" == "-m" || "$word" == "-k" ]]; then
|
|
||||||
output+="${word} "
|
|
||||||
collecting=true
|
|
||||||
marker_buf=""
|
|
||||||
else
|
|
||||||
output+="${word} "
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Flush any trailing marker expression (marker at end of command)
|
|
||||||
if $collecting && [[ -n "$marker_buf" ]]; then
|
|
||||||
if [[ "$marker_buf" == *" "* || "$marker_buf" == *"("* ]]; then
|
|
||||||
output+="'${marker_buf}'"
|
|
||||||
else
|
|
||||||
output+="${marker_buf}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "${output% }"
|
|
||||||
}
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# ROCm-specific pytest command rewrites
|
|
||||||
#
|
|
||||||
# These apply ignore flags and environment overrides for tests that are not
|
|
||||||
# yet supported or behave differently on ROCm hardware. Kept as a single
|
|
||||||
# function so new exclusions are easy to add in one place.
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
apply_rocm_test_overrides() {
|
|
||||||
local cmds="$1"
|
|
||||||
|
|
||||||
# --- Model registry filter ---
|
|
||||||
if [[ $cmds == *"pytest -v -s models/test_registry.py"* ]]; then
|
|
||||||
cmds=${cmds//"pytest -v -s models/test_registry.py"/"pytest -v -s models/test_registry.py -k 'not BambaForCausalLM and not GritLM and not Mamba2ForCausalLM and not Zamba2ForCausalLM'"}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# --- LoRA: disable custom paged attention ---
|
|
||||||
if [[ $cmds == *"pytest -v -s lora"* ]]; then
|
|
||||||
cmds=${cmds//"pytest -v -s lora"/"VLLM_ROCM_CUSTOM_PAGED_ATTN=0 pytest -v -s lora"}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# --- Kernel ignores ---
|
|
||||||
if [[ $cmds == *" kernels/core"* ]]; then
|
|
||||||
cmds="${cmds} \
|
|
||||||
--ignore=kernels/core/test_fused_quant_layernorm.py \
|
|
||||||
--ignore=kernels/core/test_permute_cols.py"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $cmds == *" kernels/attention"* ]]; then
|
|
||||||
cmds="${cmds} \
|
|
||||||
--ignore=kernels/attention/test_attention_selector.py \
|
|
||||||
--ignore=kernels/attention/test_encoder_decoder_attn.py \
|
|
||||||
--ignore=kernels/attention/test_flash_attn.py \
|
|
||||||
--ignore=kernels/attention/test_flashinfer.py \
|
|
||||||
--ignore=kernels/attention/test_prefix_prefill.py \
|
|
||||||
--ignore=kernels/attention/test_cascade_flash_attn.py \
|
|
||||||
--ignore=kernels/attention/test_mha_attn.py \
|
|
||||||
--ignore=kernels/attention/test_lightning_attn.py \
|
|
||||||
--ignore=kernels/attention/test_attention.py"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $cmds == *" kernels/quantization"* ]]; then
|
|
||||||
cmds="${cmds} \
|
|
||||||
--ignore=kernels/quantization/test_int8_quant.py \
|
|
||||||
--ignore=kernels/quantization/test_machete_mm.py \
|
|
||||||
--ignore=kernels/quantization/test_block_fp8.py \
|
|
||||||
--ignore=kernels/quantization/test_block_int8.py \
|
|
||||||
--ignore=kernels/quantization/test_marlin_gemm.py \
|
|
||||||
--ignore=kernels/quantization/test_cutlass_scaled_mm.py \
|
|
||||||
--ignore=kernels/quantization/test_int8_kernel.py"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $cmds == *" kernels/mamba"* ]]; then
|
|
||||||
cmds="${cmds} \
|
|
||||||
--ignore=kernels/mamba/test_mamba_mixer2.py \
|
|
||||||
--ignore=kernels/mamba/test_causal_conv1d.py \
|
|
||||||
--ignore=kernels/mamba/test_mamba_ssm_ssd.py"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $cmds == *" kernels/moe"* ]]; then
|
|
||||||
cmds="${cmds} \
|
|
||||||
--ignore=kernels/moe/test_moe.py \
|
|
||||||
--ignore=kernels/moe/test_cutlass_moe.py \
|
|
||||||
--ignore=kernels/moe/test_triton_moe_ptpc_fp8.py"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# --- Entrypoint ignores ---
|
|
||||||
if [[ $cmds == *" entrypoints/openai "* ]]; then
|
|
||||||
cmds=${cmds//" entrypoints/openai "/" entrypoints/openai \
|
|
||||||
--ignore=entrypoints/openai/test_audio.py \
|
|
||||||
--ignore=entrypoints/openai/test_shutdown.py \
|
|
||||||
--ignore=entrypoints/openai/test_completion.py \
|
|
||||||
--ignore=entrypoints/openai/test_models.py \
|
|
||||||
--ignore=entrypoints/openai/test_lora_adapters.py \
|
|
||||||
--ignore=entrypoints/openai/test_return_tokens_as_ids.py \
|
|
||||||
--ignore=entrypoints/openai/test_root_path.py \
|
|
||||||
--ignore=entrypoints/openai/test_tokenization.py \
|
|
||||||
--ignore=entrypoints/openai/test_prompt_validation.py "}
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $cmds == *" entrypoints/llm "* ]]; then
|
|
||||||
cmds=${cmds//" entrypoints/llm "/" entrypoints/llm \
|
|
||||||
--ignore=entrypoints/llm/test_chat.py \
|
|
||||||
--ignore=entrypoints/llm/test_accuracy.py \
|
|
||||||
--ignore=entrypoints/llm/test_init.py \
|
|
||||||
--ignore=entrypoints/llm/test_prompt_validation.py "}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Clean up escaped newlines from --ignore appends
|
|
||||||
cmds=$(echo "$cmds" | sed 's/ \\ / /g')
|
|
||||||
|
|
||||||
echo "$cmds"
|
|
||||||
}
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# Main
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
# --- GPU initialization ---
|
|
||||||
echo "--- Confirming Clean Initial State"
|
|
||||||
wait_for_clean_gpus
|
|
||||||
|
|
||||||
echo "--- ROCm info"
|
|
||||||
rocminfo
|
|
||||||
|
|
||||||
# --- Docker housekeeping ---
|
|
||||||
cleanup_docker
|
cleanup_docker
|
||||||
|
|
||||||
echo "--- Resetting GPUs"
|
echo "--- Resetting GPUs"
|
||||||
echo "reset" > /opt/amdgpu/etc/gpu_state
|
|
||||||
wait_for_clean_gpus
|
|
||||||
|
|
||||||
# --- Pull test image ---
|
echo "reset" > /opt/amdgpu/etc/gpu_state
|
||||||
echo "--- Pulling container"
|
|
||||||
|
while true; do
|
||||||
|
sleep 3
|
||||||
|
if grep -q clean /opt/amdgpu/etc/gpu_state; then
|
||||||
|
echo "GPUs state is \"clean\""
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "--- Pulling container"
|
||||||
image_name="rocm/vllm-ci:${BUILDKITE_COMMIT}"
|
image_name="rocm/vllm-ci:${BUILDKITE_COMMIT}"
|
||||||
container_name="rocm_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
container_name="rocm_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
||||||
docker pull "${image_name}"
|
docker pull "${image_name}"
|
||||||
|
|
||||||
remove_docker_container() {
|
remove_docker_container() {
|
||||||
docker rm -f "${container_name}" || docker image rm -f "${image_name}" || true
|
docker rm -f "${container_name}" || docker image rm -f "${image_name}" || true
|
||||||
}
|
}
|
||||||
trap remove_docker_container EXIT
|
trap remove_docker_container EXIT
|
||||||
|
|
||||||
# --- Prepare commands ---
|
|
||||||
echo "--- Running container"
|
echo "--- Running container"
|
||||||
|
|
||||||
HF_CACHE="$(realpath ~)/huggingface"
|
HF_CACHE="$(realpath ~)/huggingface"
|
||||||
mkdir -p "${HF_CACHE}"
|
mkdir -p "${HF_CACHE}"
|
||||||
HF_MOUNT="/root/.cache/huggingface"
|
HF_MOUNT="/root/.cache/huggingface"
|
||||||
|
|
||||||
# ---- Command source selection ----
|
commands=$@
|
||||||
# Prefer VLLM_TEST_COMMANDS (preserves all inner quoting intact).
|
echo "Commands:$commands"
|
||||||
# Fall back to $* for backward compatibility, but warn that inner
|
#ignore certain kernels tests
|
||||||
# double-quotes will have been stripped by the calling shell.
|
if [[ $commands == *" kernels "* ]]; then
|
||||||
if [[ -n "${VLLM_TEST_COMMANDS:-}" ]]; then
|
commands="${commands} \
|
||||||
commands="${VLLM_TEST_COMMANDS}"
|
--ignore=kernels/test_attention_selector.py \
|
||||||
echo "Commands sourced from VLLM_TEST_COMMANDS (quoting preserved)"
|
--ignore=kernels/test_blocksparse_attention.py \
|
||||||
else
|
--ignore=kernels/test_causal_conv1d.py \
|
||||||
commands="$*"
|
--ignore=kernels/test_cutlass.py \
|
||||||
if [[ -z "$commands" ]]; then
|
--ignore=kernels/test_encoder_decoder_attn.py \
|
||||||
echo "Error: No test commands provided." >&2
|
--ignore=kernels/test_flash_attn.py \
|
||||||
echo "Usage:" >&2
|
--ignore=kernels/test_flashinfer.py \
|
||||||
echo " Preferred: VLLM_TEST_COMMANDS='...' bash $0" >&2
|
--ignore=kernels/test_int8_quant.py \
|
||||||
echo " Legacy: bash $0 \"commands here\"" >&2
|
--ignore=kernels/test_machete_gemm.py \
|
||||||
exit 1
|
--ignore=kernels/test_mamba_ssm.py \
|
||||||
fi
|
--ignore=kernels/test_marlin_gemm.py \
|
||||||
echo "Commands sourced from positional args (legacy mode)"
|
--ignore=kernels/test_moe.py \
|
||||||
echo "WARNING: Inner double-quotes in the command string may have been"
|
--ignore=kernels/test_prefix_prefill.py \
|
||||||
echo " stripped by the calling shell. If you see syntax errors, switch to:"
|
--ignore=kernels/test_rand.py \
|
||||||
echo " export VLLM_TEST_COMMANDS='your commands here'"
|
--ignore=kernels/test_sampler.py \
|
||||||
echo " bash $0"
|
--ignore=kernels/test_cascade_flash_attn.py \
|
||||||
|
--ignore=kernels/test_mamba_mixer2.py \
|
||||||
|
--ignore=kernels/test_aqlm.py \
|
||||||
|
--ignore=kernels/test_machete_mm.py \
|
||||||
|
--ignore=kernels/test_mha_attn.py \
|
||||||
|
--ignore=kernels/test_block_fp8.py \
|
||||||
|
--ignore=kernels/test_cutlass_moe.py \
|
||||||
|
--ignore=kernels/test_mamba_ssm_ssd.py \
|
||||||
|
--ignore=kernels/test_attention.py \
|
||||||
|
--ignore=kernels/test_block_int8.py \
|
||||||
|
--ignore=kernels/test_fused_quant_layernorm.py \
|
||||||
|
--ignore=kernels/test_int8_kernel.py \
|
||||||
|
--ignore=kernels/test_triton_moe_ptpc_fp8.py \
|
||||||
|
--ignore=kernels/test_permute_cols.py"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Raw commands: $commands"
|
#ignore certain Entrypoints/openai tests
|
||||||
|
if [[ $commands == *" entrypoints/openai "* ]]; then
|
||||||
# Fix quoting before ROCm overrides (so overrides see correct structure)
|
commands=${commands//" entrypoints/openai "/" entrypoints/openai \
|
||||||
commands=$(re_quote_pytest_markers "$commands")
|
--ignore=entrypoints/openai/test_audio.py \
|
||||||
echo "After re-quoting: $commands"
|
--ignore=entrypoints/openai/test_shutdown.py \
|
||||||
|
--ignore=entrypoints/openai/test_completion.py \
|
||||||
commands=$(apply_rocm_test_overrides "$commands")
|
--ignore=entrypoints/openai/test_sleep.py \
|
||||||
echo "Final commands: $commands"
|
--ignore=entrypoints/openai/test_models.py \
|
||||||
|
--ignore=entrypoints/openai/test_lora_adapters.py \
|
||||||
MYPYTHONPATH=".."
|
--ignore=entrypoints/openai/test_return_tokens_as_ids.py \
|
||||||
|
--ignore=entrypoints/openai/test_root_path.py \
|
||||||
# Verify GPU access
|
--ignore=entrypoints/openai/test_tokenization.py \
|
||||||
render_gid=$(getent group render | cut -d: -f3)
|
--ignore=entrypoints/openai/test_prompt_validation.py "}
|
||||||
if [[ -z "$render_gid" ]]; then
|
|
||||||
echo "Error: 'render' group not found. This is required for GPU access." >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# --- RDMA device passthrough (conditional) ---
|
#ignore certain Entrypoints/llm tests
|
||||||
# If the host has RDMA devices, pass them through so tests like
|
if [[ $commands == *" entrypoints/llm "* ]]; then
|
||||||
# test_moriio_connector can access ibverbs. On hosts without RDMA
|
commands=${commands//" entrypoints/llm "/" entrypoints/llm \
|
||||||
# hardware the tests will gracefully skip via _rdma_available().
|
--ignore=entrypoints/llm/test_chat.py \
|
||||||
RDMA_FLAGS=""
|
--ignore=entrypoints/llm/test_accuracy.py \
|
||||||
if [ -d /dev/infiniband ]; then
|
--ignore=entrypoints/llm/test_init.py \
|
||||||
echo "RDMA devices detected on host, enabling passthrough"
|
--ignore=entrypoints/llm/test_generate_multiple_loras.py \
|
||||||
RDMA_FLAGS="--device /dev/infiniband --cap-add=IPC_LOCK"
|
--ignore=entrypoints/llm/test_prompt_validation.py "}
|
||||||
else
|
|
||||||
echo "No RDMA devices found on host, RDMA tests will be skipped"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# --- Route: multi-node vs single-node ---
|
#Obsolete currently
|
||||||
if is_multi_node "$commands"; then
|
##ignore certain Entrypoints/llm tests
|
||||||
echo "--- Multi-node job detected"
|
#if [[ $commands == *" && pytest -v -s entrypoints/llm/test_guided_generate.py"* ]]; then
|
||||||
export DCKR_VER=$(docker --version | sed 's/Docker version \(.*\), build .*/\1/')
|
# commands=${commands//" && pytest -v -s entrypoints/llm/test_guided_generate.py"/" "}
|
||||||
|
#fi
|
||||||
|
|
||||||
# Parse the bracket syntax: prefix ; [node0_cmds] && [node1_cmds]
|
# --ignore=entrypoints/openai/test_encoder_decoder.py \
|
||||||
# BASH_REMATCH[1] = prefix (everything before first bracket)
|
# --ignore=entrypoints/openai/test_embedding.py \
|
||||||
# BASH_REMATCH[2] = comma-separated node0 commands
|
# --ignore=entrypoints/openai/test_oot_registration.py
|
||||||
# BASH_REMATCH[3] = comma-separated node1 commands
|
# --ignore=entrypoints/openai/test_accuracy.py \
|
||||||
if [[ "$commands" =~ ^(.*)\[(.*)"] && ["(.*)\]$ ]]; then
|
# --ignore=entrypoints/openai/test_models.py <= Fails on MI250 but passes on MI300 as of 2025-03-13
|
||||||
prefix=$(echo "${BASH_REMATCH[1]}" | sed 's/;//g')
|
|
||||||
echo "PREFIX: ${prefix}"
|
|
||||||
|
|
||||||
export composite_command="(command rocm-smi || true)"
|
|
||||||
saved_IFS=$IFS
|
|
||||||
IFS=','
|
|
||||||
read -ra node0 <<< "${BASH_REMATCH[2]}"
|
|
||||||
read -ra node1 <<< "${BASH_REMATCH[3]}"
|
|
||||||
IFS=$saved_IFS
|
|
||||||
|
|
||||||
if [[ ${#node0[@]} -ne ${#node1[@]} ]]; then
|
PARALLEL_JOB_COUNT=8
|
||||||
echo "Warning: node0 has ${#node0[@]} commands, node1 has ${#node1[@]}. They will be paired by index."
|
# check if the command contains shard flag, we will run all shards in parallel because the host have 8 GPUs.
|
||||||
|
if [[ $commands == *"--shard-id="* ]]; then
|
||||||
|
# assign job count as the number of shards used
|
||||||
|
commands=${commands//"--num-shards= "/"--num-shards=${PARALLEL_JOB_COUNT} "}
|
||||||
|
for GPU in $(seq 0 $(($PARALLEL_JOB_COUNT-1))); do
|
||||||
|
# assign shard-id for each shard
|
||||||
|
commands_gpu=${commands//"--shard-id= "/"--shard-id=${GPU} "}
|
||||||
|
echo "Shard ${GPU} commands:$commands_gpu"
|
||||||
|
echo "Render devices: $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES"
|
||||||
|
docker run \
|
||||||
|
--device /dev/kfd $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES \
|
||||||
|
--network=host \
|
||||||
|
--shm-size=16gb \
|
||||||
|
--rm \
|
||||||
|
-e HIP_VISIBLE_DEVICES="${GPU}" \
|
||||||
|
-e HF_TOKEN \
|
||||||
|
-e AWS_ACCESS_KEY_ID \
|
||||||
|
-e AWS_SECRET_ACCESS_KEY \
|
||||||
|
-v "${HF_CACHE}:${HF_MOUNT}" \
|
||||||
|
-e "HF_HOME=${HF_MOUNT}" \
|
||||||
|
--name "${container_name}_${GPU}" \
|
||||||
|
"${image_name}" \
|
||||||
|
/bin/bash -c "${commands_gpu}" \
|
||||||
|
|& while read -r line; do echo ">>Shard $GPU: $line"; done &
|
||||||
|
PIDS+=($!)
|
||||||
|
done
|
||||||
|
#wait for all processes to finish and collect exit codes
|
||||||
|
for pid in "${PIDS[@]}"; do
|
||||||
|
wait "${pid}"
|
||||||
|
STATUS+=($?)
|
||||||
|
done
|
||||||
|
for st in "${STATUS[@]}"; do
|
||||||
|
if [[ ${st} -ne 0 ]]; then
|
||||||
|
echo "One of the processes failed with $st"
|
||||||
|
exit "${st}"
|
||||||
fi
|
fi
|
||||||
|
done
|
||||||
for i in "${!node0[@]}"; do
|
|
||||||
command_node_0=$(echo "${node0[i]}" | sed 's/\"//g')
|
|
||||||
command_node_1=$(echo "${node1[i]}" | sed 's/\"//g')
|
|
||||||
|
|
||||||
step_cmd="./.buildkite/scripts/run-multi-node-test.sh /vllm-workspace/tests 2 2 ${image_name} '${command_node_0}' '${command_node_1}'"
|
|
||||||
echo "COMMANDS: ${step_cmd}"
|
|
||||||
composite_command="${composite_command} && ${step_cmd}"
|
|
||||||
done
|
|
||||||
|
|
||||||
/bin/bash -c "${composite_command}"
|
|
||||||
exit_code=$?
|
|
||||||
cleanup_network
|
|
||||||
handle_pytest_exit "$exit_code"
|
|
||||||
else
|
|
||||||
echo "Multi-node job detected but failed to parse bracket command syntax."
|
|
||||||
echo "Expected format: prefix ; [node0_cmd1, node0_cmd2] && [node1_cmd1, node1_cmd2]"
|
|
||||||
echo "Got: $commands"
|
|
||||||
cleanup_network
|
|
||||||
exit 111
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
echo "--- Single-node job"
|
|
||||||
echo "Render devices: $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES"
|
echo "Render devices: $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES"
|
||||||
docker run \
|
docker run \
|
||||||
--device /dev/kfd $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES \
|
--device /dev/kfd $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES \
|
||||||
$RDMA_FLAGS \
|
--network=host \
|
||||||
--network=host \
|
--shm-size=16gb \
|
||||||
--shm-size=16gb \
|
--rm \
|
||||||
--group-add "$render_gid" \
|
-e HIP_VISIBLE_DEVICES=0 \
|
||||||
--rm \
|
-e HF_TOKEN \
|
||||||
-e HF_TOKEN \
|
-e AWS_ACCESS_KEY_ID \
|
||||||
-e AWS_ACCESS_KEY_ID \
|
-e AWS_SECRET_ACCESS_KEY \
|
||||||
-e AWS_SECRET_ACCESS_KEY \
|
-v "${HF_CACHE}:${HF_MOUNT}" \
|
||||||
-v "${HF_CACHE}:${HF_MOUNT}" \
|
-e "HF_HOME=${HF_MOUNT}" \
|
||||||
-e "HF_HOME=${HF_MOUNT}" \
|
--name "${container_name}" \
|
||||||
-e "PYTHONPATH=${MYPYTHONPATH}" \
|
"${image_name}" \
|
||||||
--name "${container_name}" \
|
/bin/bash -c "${commands}"
|
||||||
"${image_name}" \
|
|
||||||
/bin/bash -c "${commands}"
|
|
||||||
|
|
||||||
exit_code=$?
|
|
||||||
handle_pytest_exit "$exit_code"
|
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,43 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -euox pipefail
|
|
||||||
export VLLM_CPU_CI_ENV=0
|
|
||||||
|
|
||||||
echo "--- PP+TP"
|
|
||||||
vllm serve meta-llama/Llama-3.2-3B-Instruct -tp=2 -pp=2 &
|
|
||||||
server_pid=$!
|
|
||||||
timeout 600 bash -c "until curl localhost:8000/v1/models > /dev/null 2>&1; do sleep 1; done" || exit 1
|
|
||||||
vllm bench serve \
|
|
||||||
--backend vllm \
|
|
||||||
--dataset-name random \
|
|
||||||
--model meta-llama/Llama-3.2-3B-Instruct \
|
|
||||||
--num-prompts 20 \
|
|
||||||
--result-dir ./test_results \
|
|
||||||
--result-filename tp_pp.json \
|
|
||||||
--save-result \
|
|
||||||
--endpoint /v1/completions
|
|
||||||
kill -s SIGTERM $server_pid; wait $server_pid || true
|
|
||||||
failed_req=$(jq '.failed' ./test_results/tp_pp.json)
|
|
||||||
if [ "$failed_req" -ne 0 ]; then
|
|
||||||
echo "Some requests were failed!"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "--- DP+TP"
|
|
||||||
vllm serve meta-llama/Llama-3.2-3B-Instruct -tp=2 -dp=2 &
|
|
||||||
server_pid=$!
|
|
||||||
timeout 600 bash -c "until curl localhost:8000/v1/models > /dev/null 2>&1; do sleep 1; done" || exit 1
|
|
||||||
vllm bench serve \
|
|
||||||
--backend vllm \
|
|
||||||
--dataset-name random \
|
|
||||||
--model meta-llama/Llama-3.2-3B-Instruct \
|
|
||||||
--num-prompts 20 \
|
|
||||||
--result-dir ./test_results \
|
|
||||||
--result-filename dp_pp.json \
|
|
||||||
--save-result \
|
|
||||||
--endpoint /v1/completions
|
|
||||||
kill -s SIGTERM $server_pid; wait $server_pid || true
|
|
||||||
failed_req=$(jq '.failed' ./test_results/dp_pp.json)
|
|
||||||
if [ "$failed_req" -ne 0 ]; then
|
|
||||||
echo "Some requests were failed!"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This script build the CPU docker image and run the offline inference inside the container.
|
|
||||||
# It serves a sanity check for compilation and basic model usage.
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# allow to bind to different cores
|
|
||||||
CORE_RANGE=${CORE_RANGE:-0-16}
|
|
||||||
OMP_CORE_RANGE=${OMP_CORE_RANGE:-0-16}
|
|
||||||
|
|
||||||
export CMAKE_BUILD_PARALLEL_LEVEL=16
|
|
||||||
|
|
||||||
# Setup cleanup
|
|
||||||
remove_docker_container() {
|
|
||||||
set -e;
|
|
||||||
docker rm -f cpu-test || true;
|
|
||||||
}
|
|
||||||
trap remove_docker_container EXIT
|
|
||||||
remove_docker_container
|
|
||||||
|
|
||||||
# Try building the docker image
|
|
||||||
docker build --tag cpu-test --target vllm-test -f docker/Dockerfile.cpu .
|
|
||||||
|
|
||||||
# Run the image
|
|
||||||
docker run -itd --cpuset-cpus="$CORE_RANGE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=16 --env VLLM_CPU_CI_ENV=1 -e E2E_OMP_THREADS="$OMP_CORE_RANGE" --shm-size=4g --name cpu-test cpu-test
|
|
||||||
|
|
||||||
function cpu_tests() {
|
|
||||||
set -e
|
|
||||||
|
|
||||||
docker exec cpu-test bash -c "
|
|
||||||
set -e
|
|
||||||
pip list"
|
|
||||||
|
|
||||||
# offline inference
|
|
||||||
docker exec cpu-test bash -c "
|
|
||||||
set -e
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m"
|
|
||||||
|
|
||||||
# Run model tests
|
|
||||||
docker exec cpu-test bash -c "
|
|
||||||
set -e
|
|
||||||
pytest -x -v -s tests/models/multimodal/generation/test_whisper.py -m cpu_model"
|
|
||||||
|
|
||||||
# Run kernel tests
|
|
||||||
docker exec cpu-test bash -c "
|
|
||||||
set -e
|
|
||||||
pytest -x -v -s tests/kernels/test_onednn.py
|
|
||||||
pytest -x -v -s tests/kernels/attention/test_cpu_attn.py
|
|
||||||
pytest -x -v -s tests/kernels/moe/test_moe.py -k test_cpu_fused_moe_basic"
|
|
||||||
|
|
||||||
# basic online serving
|
|
||||||
docker exec cpu-test bash -c '
|
|
||||||
set -e
|
|
||||||
VLLM_CPU_OMP_THREADS_BIND=$E2E_OMP_THREADS vllm serve Qwen/Qwen3-0.6B --max-model-len 2048 &
|
|
||||||
server_pid=$!
|
|
||||||
timeout 600 bash -c "until curl localhost:8000/v1/models; do sleep 1; done" || exit 1
|
|
||||||
vllm bench serve \
|
|
||||||
--backend vllm \
|
|
||||||
--dataset-name random \
|
|
||||||
--model Qwen/Qwen3-0.6B \
|
|
||||||
--num-prompts 20 \
|
|
||||||
--endpoint /v1/completions
|
|
||||||
kill -s SIGTERM $server_pid &'
|
|
||||||
}
|
|
||||||
|
|
||||||
# All of CPU tests are expected to be finished less than 40 mins.
|
|
||||||
export -f cpu_tests
|
|
||||||
timeout 2h bash -c cpu_tests
|
|
||||||
@@ -7,7 +7,6 @@ set -ex
|
|||||||
# Setup cleanup
|
# Setup cleanup
|
||||||
remove_docker_container() {
|
remove_docker_container() {
|
||||||
if [[ -n "$container_id" ]]; then
|
if [[ -n "$container_id" ]]; then
|
||||||
podman stop --all -t0
|
|
||||||
podman rm -f "$container_id" || true
|
podman rm -f "$container_id" || true
|
||||||
fi
|
fi
|
||||||
podman system prune -f
|
podman system prune -f
|
||||||
@@ -25,30 +24,22 @@ function cpu_tests() {
|
|||||||
|
|
||||||
# offline inference
|
# offline inference
|
||||||
podman exec -it "$container_id" bash -c "
|
podman exec -it "$container_id" bash -c "
|
||||||
export TORCH_COMPILE_DISABLE=1
|
set -e
|
||||||
set -xve
|
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m"
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m" >> "$HOME"/test_basic.log
|
|
||||||
|
|
||||||
# Run basic model test
|
# Run basic model test
|
||||||
podman exec -it "$container_id" bash -c "
|
podman exec -it "$container_id" bash -c "
|
||||||
export TORCH_COMPILE_DISABLE=1
|
set -e
|
||||||
set -evx
|
|
||||||
pip install pytest pytest-asyncio einops peft Pillow soundfile transformers_stream_generator matplotlib
|
pip install pytest pytest-asyncio einops peft Pillow soundfile transformers_stream_generator matplotlib
|
||||||
pip install sentence-transformers datamodel_code_generator tblib
|
pip install sentence-transformers datamodel_code_generator
|
||||||
|
pytest -v -s tests/models/embedding/language/test_cls_models.py::test_classification_models[float-jason9693/Qwen2.5-1.5B-apeach]
|
||||||
# Note: disable Bart until supports V1
|
pytest -v -s tests/models/embedding/language/test_embedding.py::test_models[half-BAAI/bge-base-en-v1.5]
|
||||||
# pytest -v -s tests/models/language/generation/test_bart.py -m cpu_model
|
pytest -v -s tests/models/encoder_decoder/language -m cpu_model"
|
||||||
pytest -v -s tests/models/language/generation/test_common.py::test_models[False-False-5-32-openai-community/gpt2]
|
|
||||||
pytest -v -s tests/models/language/generation/test_common.py::test_models[False-False-5-32-facebook/opt-125m]
|
|
||||||
pytest -v -s tests/models/language/generation/test_common.py::test_models[False-False-5-32-google/gemma-1.1-2b-it]
|
|
||||||
pytest -v -s tests/models/language/pooling/test_classification.py::test_models[float-jason9693/Qwen2.5-1.5B-apeach]
|
|
||||||
# TODO: Below test case tests/models/language/pooling/test_embedding.py::test_models[True-ssmits/Qwen2-7B-Instruct-embed-base] fails on ppc64le. Disabling it for time being.
|
|
||||||
# pytest -v -s tests/models/language/pooling/test_embedding.py -m cpu_model" >> "$HOME"/test_rest.log
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# All of CPU tests are expected to be finished less than 40 mins.
|
# All of CPU tests are expected to be finished less than 40 mins.
|
||||||
|
|
||||||
export container_id
|
export container_id
|
||||||
export -f cpu_tests
|
export -f cpu_tests
|
||||||
timeout 120m bash -c cpu_tests
|
timeout 40m bash -c cpu_tests
|
||||||
|
|
||||||
|
|||||||
@@ -2,19 +2,93 @@
|
|||||||
|
|
||||||
# This script build the CPU docker image and run the offline inference inside the container.
|
# This script build the CPU docker image and run the offline inference inside the container.
|
||||||
# It serves a sanity check for compilation and basic model usage.
|
# It serves a sanity check for compilation and basic model usage.
|
||||||
set -euox pipefail
|
set -ex
|
||||||
|
|
||||||
# allow to bind to different cores
|
# allow to bind to different cores
|
||||||
CORE_RANGE=${CORE_RANGE:-48-95}
|
CORE_RANGE=${CORE_RANGE:-48-95}
|
||||||
NUMA_NODE=${NUMA_NODE:-1}
|
NUMA_NODE=${NUMA_NODE:-1}
|
||||||
IMAGE_NAME="cpu-test-$NUMA_NODE"
|
|
||||||
TIMEOUT_VAL=$1
|
|
||||||
TEST_COMMAND=$2
|
|
||||||
|
|
||||||
# building the docker image
|
# Setup cleanup
|
||||||
echo "--- :docker: Building Docker image"
|
remove_docker_container() {
|
||||||
docker build --progress plain --tag "$IMAGE_NAME" --target vllm-test -f docker/Dockerfile.cpu .
|
set -e;
|
||||||
|
docker rm -f cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2-"$NUMA_NODE" || true;
|
||||||
|
docker image rm cpu-test-"$BUILDKITE_BUILD_NUMBER" cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2 || true;
|
||||||
|
}
|
||||||
|
trap remove_docker_container EXIT
|
||||||
|
remove_docker_container
|
||||||
|
|
||||||
|
# Try building the docker image
|
||||||
|
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --tag cpu-test-"$BUILDKITE_BUILD_NUMBER" --target vllm-test -f docker/Dockerfile.cpu .
|
||||||
|
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" --tag cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2 --target vllm-test -f docker/Dockerfile.cpu .
|
||||||
|
|
||||||
# Run the image, setting --shm-size=4g for tensor parallel.
|
# Run the image, setting --shm-size=4g for tensor parallel.
|
||||||
docker run --rm --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN -e VLLM_CPU_KVCACHE_SPACE=16 -e VLLM_CPU_CI_ENV=1 -e VLLM_CPU_SIM_MULTI_NUMA=1 --shm-size=4g "$IMAGE_NAME" \
|
docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus="$CORE_RANGE" \
|
||||||
timeout "$TIMEOUT_VAL" bash -c "set -euox pipefail; echo \"--- Print packages\"; pip list; echo \"--- Running tests\"; ${TEST_COMMAND}"
|
--cpuset-mems="$NUMA_NODE" --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" cpu-test-"$BUILDKITE_BUILD_NUMBER"
|
||||||
|
docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus="$CORE_RANGE" \
|
||||||
|
--cpuset-mems="$NUMA_NODE" --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2-"$NUMA_NODE" cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2
|
||||||
|
|
||||||
|
function cpu_tests() {
|
||||||
|
set -e
|
||||||
|
export NUMA_NODE=$2
|
||||||
|
export BUILDKITE_BUILD_NUMBER=$3
|
||||||
|
|
||||||
|
# offline inference
|
||||||
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-avx2-"$NUMA_NODE" bash -c "
|
||||||
|
set -e
|
||||||
|
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m"
|
||||||
|
|
||||||
|
# Run basic model test
|
||||||
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
||||||
|
set -e
|
||||||
|
pytest -v -s tests/kernels/test_cache.py -m cpu_model
|
||||||
|
pytest -v -s tests/kernels/test_mla_decode_cpu.py -m cpu_model
|
||||||
|
pytest -v -s tests/models/decoder_only/language -m cpu_model
|
||||||
|
pytest -v -s tests/models/embedding/language -m cpu_model
|
||||||
|
pytest -v -s tests/models/encoder_decoder/language -m cpu_model
|
||||||
|
pytest -v -s tests/models/decoder_only/audio_language -m cpu_model
|
||||||
|
pytest -v -s tests/models/decoder_only/vision_language -m cpu_model"
|
||||||
|
|
||||||
|
# Run compressed-tensor test
|
||||||
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
||||||
|
set -e
|
||||||
|
pytest -s -v \
|
||||||
|
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_static_setup \
|
||||||
|
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_dynamic_per_token"
|
||||||
|
|
||||||
|
# Run AWQ test
|
||||||
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
||||||
|
set -e
|
||||||
|
pytest -s -v \
|
||||||
|
tests/quantization/test_ipex_quant.py"
|
||||||
|
|
||||||
|
# Run chunked-prefill and prefix-cache test
|
||||||
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
||||||
|
set -e
|
||||||
|
pytest -s -v -k cpu_model \
|
||||||
|
tests/basic_correctness/test_chunked_prefill.py"
|
||||||
|
|
||||||
|
# online serving
|
||||||
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
||||||
|
set -e
|
||||||
|
export VLLM_CPU_KVCACHE_SPACE=10
|
||||||
|
export VLLM_CPU_OMP_THREADS_BIND=$1
|
||||||
|
python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m --dtype half &
|
||||||
|
timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1
|
||||||
|
python3 benchmarks/benchmark_serving.py \
|
||||||
|
--backend vllm \
|
||||||
|
--dataset-name random \
|
||||||
|
--model facebook/opt-125m \
|
||||||
|
--num-prompts 20 \
|
||||||
|
--endpoint /v1/completions \
|
||||||
|
--tokenizer facebook/opt-125m"
|
||||||
|
|
||||||
|
# Run multi-lora tests
|
||||||
|
docker exec cpu-test-"$BUILDKITE_BUILD_NUMBER"-"$NUMA_NODE" bash -c "
|
||||||
|
set -e
|
||||||
|
pytest -s -v \
|
||||||
|
tests/lora/test_qwen2vl.py"
|
||||||
|
}
|
||||||
|
|
||||||
|
# All of CPU tests are expected to be finished less than 40 mins.
|
||||||
|
export -f cpu_tests
|
||||||
|
timeout 40m bash -c "cpu_tests $CORE_RANGE $NUMA_NODE $BUILDKITE_BUILD_NUMBER"
|
||||||
|
|||||||
@@ -16,7 +16,8 @@ DOCKER_BUILDKIT=1 docker build . \
|
|||||||
--build-arg max_jobs=66 \
|
--build-arg max_jobs=66 \
|
||||||
--build-arg nvcc_threads=2 \
|
--build-arg nvcc_threads=2 \
|
||||||
--build-arg RUN_WHEEL_CHECK=false \
|
--build-arg RUN_WHEEL_CHECK=false \
|
||||||
--build-arg torch_cuda_arch_list="9.0+PTX"
|
--build-arg torch_cuda_arch_list="9.0+PTX" \
|
||||||
|
--build-arg vllm_fa_cmake_gpu_arches="90-real"
|
||||||
|
|
||||||
# Setup cleanup
|
# Setup cleanup
|
||||||
remove_docker_container() { docker rm -f gh200-test || true; }
|
remove_docker_container() { docker rm -f gh200-test || true; }
|
||||||
|
|||||||
@@ -1,89 +1,24 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# This script builds the HPU docker image and runs the offline inference inside the container.
|
# This script build the CPU docker image and run the offline inference inside the container.
|
||||||
# It serves a sanity check for compilation and basic model usage.
|
# It serves a sanity check for compilation and basic model usage.
|
||||||
#
|
set -ex
|
||||||
# vllm-gaudi compatibility pinning:
|
|
||||||
# The vllm-gaudi plugin is installed on top of the vllm upstream checkout used by this CI job.
|
|
||||||
# When upstream vllm changes its API, the plugin may break before it has been updated.
|
|
||||||
# To handle this, the vllm-gaudi repository maintains a file:
|
|
||||||
# vllm/last-good-commit-for-vllm-gaudi/VLLM_COMMUNITY_COMMIT
|
|
||||||
# The first line of that file controls what version of vllm is used inside the Docker image:
|
|
||||||
# - "latest" : no checkout override; the current Buildkite CI commit is used as-is.
|
|
||||||
# - "<commit SHA>" : vllm is checked out to that specific commit before building, pinning
|
|
||||||
# the test to a known-compatible baseline.
|
|
||||||
# To unpin (resume testing against the live vllm tip), set the file content back to "latest".
|
|
||||||
set -exuo pipefail
|
|
||||||
|
|
||||||
# Fetch the vllm community commit reference from vllm-gaudi (first line only).
|
|
||||||
VLLM_COMMUNITY_COMMIT=$(curl -s \
|
|
||||||
https://raw.githubusercontent.com/vllm-project/vllm-gaudi/vllm/last-good-commit-for-vllm-gaudi/VLLM_COMMUNITY_COMMIT \
|
|
||||||
| head -1 | tr -d '\n')
|
|
||||||
|
|
||||||
echo "Using vllm community commit: ${VLLM_COMMUNITY_COMMIT}"
|
|
||||||
|
|
||||||
# Try building the docker image
|
# Try building the docker image
|
||||||
image_name="hpu/upstream-vllm-ci:${BUILDKITE_COMMIT}"
|
docker build -t hpu-test-env -f docker/Dockerfile.hpu .
|
||||||
container_name="hpu-upstream-vllm-ci-${BUILDKITE_COMMIT}-container"
|
|
||||||
cat <<EOF | docker build -t "${image_name}" -f - .
|
|
||||||
FROM gaudi-base-image:latest
|
|
||||||
|
|
||||||
COPY ./ /workspace/vllm
|
|
||||||
|
|
||||||
# If VLLM_COMMUNITY_COMMIT is a specific commit (not "latest"), check it out to pin vllm
|
|
||||||
# to the version known to be compatible with vllm-gaudi. When the value is "latest",
|
|
||||||
# the current checkout (the Buildkite CI commit) is used unchanged.
|
|
||||||
RUN if [ "${VLLM_COMMUNITY_COMMIT}" != "latest" ]; then \
|
|
||||||
cd /workspace/vllm && git fetch --unshallow 2>/dev/null || true && git checkout ${VLLM_COMMUNITY_COMMIT}; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
WORKDIR /workspace/vllm
|
|
||||||
|
|
||||||
ENV no_proxy=localhost,127.0.0.1
|
|
||||||
ENV PT_HPU_ENABLE_LAZY_COLLECTIVES=true
|
|
||||||
|
|
||||||
RUN bash -c 'pip install -r <(sed "/^torch/d" requirements/build.txt)'
|
|
||||||
RUN VLLM_TARGET_DEVICE=empty pip install --no-build-isolation -e .
|
|
||||||
RUN pip install git+https://github.com/vllm-project/vllm-gaudi.git
|
|
||||||
|
|
||||||
# install development dependencies (for testing)
|
|
||||||
RUN python3 -m pip install -e tests/vllm_test_utils
|
|
||||||
|
|
||||||
WORKDIR /workspace/
|
|
||||||
|
|
||||||
RUN git clone https://github.com/vllm-project/vllm-gaudi.git
|
|
||||||
|
|
||||||
RUN ln -s /workspace/vllm/tests && ln -s /workspace/vllm/examples && ln -s /workspace/vllm/benchmarks
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Setup cleanup
|
# Setup cleanup
|
||||||
# certain versions of HPU software stack have a bug that can
|
# certain versions of HPU software stack have a bug that can
|
||||||
# override the exit code of the script, so we need to use
|
# override the exit code of the script, so we need to use
|
||||||
# separate remove_docker_containers and remove_docker_containers_and_exit
|
# separate remove_docker_container and remove_docker_container_and_exit
|
||||||
# functions, while other platforms only need one remove_docker_container
|
# functions, while other platforms only need one remove_docker_container
|
||||||
# function.
|
# function.
|
||||||
EXITCODE=1
|
EXITCODE=1
|
||||||
remove_docker_containers() { docker rm -f "${container_name}" || true; }
|
remove_docker_container() { docker rm -f hpu-test || true; }
|
||||||
trap 'remove_docker_containers; exit $EXITCODE;' EXIT
|
remove_docker_container_and_exit() { remove_docker_container; exit $EXITCODE; }
|
||||||
remove_docker_containers
|
trap remove_docker_container_and_exit EXIT
|
||||||
|
remove_docker_container
|
||||||
echo "Running HPU plugin v1 test"
|
|
||||||
docker run --rm --runtime=habana --name="${container_name}" --network=host \
|
|
||||||
-e HABANA_VISIBLE_DEVICES=all \
|
|
||||||
-e VLLM_SKIP_WARMUP=true \
|
|
||||||
-e PT_HPU_ENABLE_LAZY_COLLECTIVES=true \
|
|
||||||
-e PT_HPU_LAZY_MODE=1 \
|
|
||||||
"${image_name}" \
|
|
||||||
/bin/bash -c '
|
|
||||||
cd vllm; timeout 120s python -u examples/offline_inference/basic/generate.py --model facebook/opt-125m
|
|
||||||
'
|
|
||||||
|
|
||||||
|
# Run the image and launch offline inference
|
||||||
|
docker run --runtime=habana --name=hpu-test --network=host -e HABANA_VISIBLE_DEVICES=all -e VLLM_SKIP_WARMUP=true --entrypoint="" hpu-test-env python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m
|
||||||
EXITCODE=$?
|
EXITCODE=$?
|
||||||
if [ $EXITCODE -eq 0 ]; then
|
|
||||||
echo "Test with basic model passed"
|
|
||||||
else
|
|
||||||
echo "Test with basic model FAILED with exit code: $EXITCODE" >&2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# The trap will handle the container removal and final exit.
|
|
||||||
54
.buildkite/scripts/hardware_ci/run-neuron-test.sh
Normal file
54
.buildkite/scripts/hardware_ci/run-neuron-test.sh
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script build the Neuron docker image and run the API server inside the container.
|
||||||
|
# It serves a sanity check for compilation and basic model usage.
|
||||||
|
set -e
|
||||||
|
set -v
|
||||||
|
|
||||||
|
image_name="neuron/vllm-ci"
|
||||||
|
container_name="neuron_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
||||||
|
|
||||||
|
HF_CACHE="$(realpath ~)/huggingface"
|
||||||
|
mkdir -p "${HF_CACHE}"
|
||||||
|
HF_MOUNT="/root/.cache/huggingface"
|
||||||
|
|
||||||
|
NEURON_COMPILE_CACHE_URL="$(realpath ~)/neuron_compile_cache"
|
||||||
|
mkdir -p "${NEURON_COMPILE_CACHE_URL}"
|
||||||
|
NEURON_COMPILE_CACHE_MOUNT="/root/.cache/neuron_compile_cache"
|
||||||
|
|
||||||
|
# Try building the docker image
|
||||||
|
aws ecr get-login-password --region us-west-2 | docker login --username AWS --password-stdin 763104351884.dkr.ecr.us-west-2.amazonaws.com
|
||||||
|
|
||||||
|
# prune old image and containers to save disk space, and only once a day
|
||||||
|
# by using a timestamp file in tmp.
|
||||||
|
if [ -f /tmp/neuron-docker-build-timestamp ]; then
|
||||||
|
last_build=$(cat /tmp/neuron-docker-build-timestamp)
|
||||||
|
current_time=$(date +%s)
|
||||||
|
if [ $((current_time - last_build)) -gt 86400 ]; then
|
||||||
|
# Remove dangling images (those that are not tagged and not used by any container)
|
||||||
|
docker image prune -f
|
||||||
|
# Remove unused volumes / force the system prune for old images as well.
|
||||||
|
docker volume prune -f && docker system prune -f
|
||||||
|
echo "$current_time" > /tmp/neuron-docker-build-timestamp
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
date "+%s" > /tmp/neuron-docker-build-timestamp
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker build -t "${image_name}" -f docker/Dockerfile.neuron .
|
||||||
|
|
||||||
|
# Setup cleanup
|
||||||
|
remove_docker_container() {
|
||||||
|
docker image rm -f "${image_name}" || true;
|
||||||
|
}
|
||||||
|
trap remove_docker_container EXIT
|
||||||
|
|
||||||
|
# Run the image
|
||||||
|
docker run --rm -it --device=/dev/neuron0 --network bridge \
|
||||||
|
-v "${HF_CACHE}:${HF_MOUNT}" \
|
||||||
|
-e "HF_HOME=${HF_MOUNT}" \
|
||||||
|
-v "${NEURON_COMPILE_CACHE_URL}:${NEURON_COMPILE_CACHE_MOUNT}" \
|
||||||
|
-e "NEURON_COMPILE_CACHE_URL=${NEURON_COMPILE_CACHE_MOUNT}" \
|
||||||
|
--name "${container_name}" \
|
||||||
|
${image_name} \
|
||||||
|
/bin/bash -c "python3 /workspace/vllm/examples/offline_inference/neuron.py && python3 -m pytest /workspace/vllm/tests/neuron/1_core/ -v --capture=tee-sys && python3 -m pytest /workspace/vllm/tests/neuron/2_core/ -v --capture=tee-sys"
|
||||||
@@ -1,187 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# This script build the Ascend NPU docker image and run the offline inference inside the container.
|
|
||||||
# It serves a sanity check for compilation and basic model usage.
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# Base ubuntu image with basic ascend development libraries and python installed
|
|
||||||
VLLM_ASCEND_REPO="https://github.com/vllm-project/vllm-ascend.git"
|
|
||||||
CONFIG_FILE_REMOTE_PATH="tests/e2e/vllm_interface/vllm_test.cfg"
|
|
||||||
TEST_RUN_CONFIG_FILE="vllm_test.cfg"
|
|
||||||
VLLM_ASCEND_TMP_DIR=
|
|
||||||
# Get the test run configuration file from the vllm-ascend repository
|
|
||||||
fetch_vllm_test_cfg() {
|
|
||||||
VLLM_ASCEND_TMP_DIR=$(mktemp -d)
|
|
||||||
# Ensure that the temporary directory is cleaned up when an exception occurs during configuration file retrieval
|
|
||||||
cleanup() {
|
|
||||||
rm -rf "${VLLM_ASCEND_TMP_DIR}"
|
|
||||||
}
|
|
||||||
trap cleanup EXIT
|
|
||||||
|
|
||||||
GIT_TRACE=1 git clone -v --depth 1 "${VLLM_ASCEND_REPO}" "${VLLM_ASCEND_TMP_DIR}"
|
|
||||||
if [ ! -f "${VLLM_ASCEND_TMP_DIR}/${CONFIG_FILE_REMOTE_PATH}" ]; then
|
|
||||||
echo "Error: file '${CONFIG_FILE_REMOTE_PATH}' does not exist in the warehouse" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If the file already exists locally, just overwrite it
|
|
||||||
cp "${VLLM_ASCEND_TMP_DIR}/${CONFIG_FILE_REMOTE_PATH}" "${TEST_RUN_CONFIG_FILE}"
|
|
||||||
echo "Copied ${CONFIG_FILE_REMOTE_PATH} to ${TEST_RUN_CONFIG_FILE}"
|
|
||||||
|
|
||||||
# Since the trap will be overwritten later, and when it is executed here, the task of cleaning up resources
|
|
||||||
# when the trap is abnormal has been completed, so the temporary resources are manually deleted here.
|
|
||||||
rm -rf "${VLLM_ASCEND_TMP_DIR}"
|
|
||||||
trap - EXIT
|
|
||||||
}
|
|
||||||
|
|
||||||
# Downloads test run configuration file from a remote URL.
|
|
||||||
# Loads the configuration into the current script environment.
|
|
||||||
get_config() {
|
|
||||||
if [ ! -f "${TEST_RUN_CONFIG_FILE}" ]; then
|
|
||||||
echo "Error: file '${TEST_RUN_CONFIG_FILE}' does not exist in the warehouse" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
# shellcheck source=/dev/null
|
|
||||||
source "${TEST_RUN_CONFIG_FILE}"
|
|
||||||
echo "Base docker image name that get from configuration: ${BASE_IMAGE_NAME}"
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
# get test running configuration.
|
|
||||||
fetch_vllm_test_cfg
|
|
||||||
# Check if the function call was successful. If not, exit the script.
|
|
||||||
if ! get_config; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
image_name="npu/vllm-ci:${BUILDKITE_COMMIT}_${EPOCHSECONDS}"
|
|
||||||
container_name="npu_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
|
||||||
|
|
||||||
# BUILDKITE_AGENT_NAME format is {hostname}-{agent_idx}-{npu_card_num}cards
|
|
||||||
agent_idx=$(echo "${BUILDKITE_AGENT_NAME}" | awk -F'-' '{print $(NF-1)}')
|
|
||||||
echo "agent_idx: ${agent_idx}"
|
|
||||||
builder_name="cachebuilder${agent_idx}"
|
|
||||||
builder_cache_dir="/mnt/docker-cache${agent_idx}"
|
|
||||||
mkdir -p "${builder_cache_dir}"
|
|
||||||
|
|
||||||
# Try building the docker image
|
|
||||||
cat <<EOF | DOCKER_BUILDKIT=1 docker build \
|
|
||||||
--add-host cache-service-vllm.nginx-pypi-cache.svc.cluster.local:"${PYPI_CACHE_HOST}" \
|
|
||||||
--builder "${builder_name}" --cache-from type=local,src="${builder_cache_dir}" \
|
|
||||||
--cache-to type=local,dest="${builder_cache_dir}",mode=max \
|
|
||||||
--progress=plain --load -t "${image_name}" -f - .
|
|
||||||
FROM ${BASE_IMAGE_NAME}
|
|
||||||
|
|
||||||
# Define environments
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV SOC_VERSION="ascend910b1"
|
|
||||||
|
|
||||||
RUN pip config set global.index-url http://cache-service-vllm.nginx-pypi-cache.svc.cluster.local:${PYPI_CACHE_PORT}/pypi/simple && \
|
|
||||||
pip config set global.trusted-host cache-service-vllm.nginx-pypi-cache.svc.cluster.local && \
|
|
||||||
apt-get update -y && \
|
|
||||||
apt-get install -y python3-pip git vim wget net-tools gcc g++ cmake libnuma-dev && \
|
|
||||||
rm -rf /var/cache/apt/* && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# Install for pytest to make the docker build cache layer always valid
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
||||||
pip install pytest>=6.0 modelscope
|
|
||||||
|
|
||||||
WORKDIR /workspace/vllm
|
|
||||||
|
|
||||||
# Install vLLM dependencies in advance. Effect: As long as common.txt remains unchanged, the docker cache layer will be valid.
|
|
||||||
COPY requirements/common.txt /workspace/vllm/requirements/common.txt
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
||||||
pip install -r requirements/common.txt
|
|
||||||
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
# Install vLLM
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
||||||
VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \
|
|
||||||
python3 -m pip uninstall -y triton
|
|
||||||
|
|
||||||
# Install vllm-ascend
|
|
||||||
WORKDIR /workspace
|
|
||||||
ARG VLLM_ASCEND_REPO=https://github.com/vllm-project/vllm-ascend.git
|
|
||||||
ARG VLLM_ASCEND_TAG=main
|
|
||||||
RUN git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf "https://github.com/" && \
|
|
||||||
git clone --depth 1 \$VLLM_ASCEND_REPO --branch \$VLLM_ASCEND_TAG /workspace/vllm-ascend
|
|
||||||
|
|
||||||
# Install vllm dependencies in advance. Effect: As long as common.txt remains unchanged, the docker cache layer will be valid.
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
||||||
pip install -r /workspace/vllm-ascend/requirements.txt
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
||||||
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \
|
|
||||||
source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
|
|
||||||
source /usr/local/Ascend/nnal/atb/set_env.sh && \
|
|
||||||
export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/$(uname -i)-linux/devlib && \
|
|
||||||
python3 -m pip install -v -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/
|
|
||||||
|
|
||||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
|
||||||
ENV VLLM_USE_MODELSCOPE=True
|
|
||||||
|
|
||||||
WORKDIR /workspace/vllm-ascend
|
|
||||||
|
|
||||||
CMD ["/bin/bash"]
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Setup cleanup
|
|
||||||
remove_docker_container() {
|
|
||||||
docker rm -f "${container_name}" || true;
|
|
||||||
docker image rm -f "${image_name}" || true;
|
|
||||||
docker system prune -f || true;
|
|
||||||
}
|
|
||||||
trap remove_docker_container EXIT
|
|
||||||
|
|
||||||
# Generate corresponding --device args based on BUILDKITE_AGENT_NAME
|
|
||||||
# Ascend NPU BUILDKITE_AGENT_NAME format is {hostname}-{agent_idx}-{npu_card_num}cards, and agent_idx starts from 1.
|
|
||||||
# e.g. atlas-a2-001-1-2cards means this is the 1-th agent on atlas-a2-001 host, and it has 2 NPU cards.
|
|
||||||
# returns one argument per line: --device, /dev/davinciX, ...
|
|
||||||
parse_and_gen_devices() {
|
|
||||||
local input="$1"
|
|
||||||
local index cards_num
|
|
||||||
if [[ "$input" =~ ([0-9]+)-([0-9]+)cards$ ]]; then
|
|
||||||
index="${BASH_REMATCH[1]}"
|
|
||||||
cards_num="${BASH_REMATCH[2]}"
|
|
||||||
else
|
|
||||||
echo "parse error" >&2
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
local i=0
|
|
||||||
while (( i < cards_num )); do
|
|
||||||
local dev_idx=$(((index - 1)*cards_num + i ))
|
|
||||||
printf '%s\n' "--device"
|
|
||||||
printf '%s\n' "/dev/davinci${dev_idx}"
|
|
||||||
((i++))
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
mapfile -t device_args < <(parse_and_gen_devices "${BUILDKITE_AGENT_NAME}") || exit 1
|
|
||||||
|
|
||||||
# Run the image and execute the Out-Of-Tree (OOT) platform interface test case on Ascend NPU hardware.
|
|
||||||
# This test checks whether the OOT platform interface is functioning properly in conjunction with
|
|
||||||
# the hardware plugin vllm-ascend.
|
|
||||||
model_cache_dir=/mnt/modelscope${agent_idx}
|
|
||||||
mkdir -p "${model_cache_dir}"
|
|
||||||
docker run \
|
|
||||||
"${device_args[@]}" \
|
|
||||||
--device /dev/davinci_manager \
|
|
||||||
--device /dev/devmm_svm \
|
|
||||||
--device /dev/hisi_hdc \
|
|
||||||
-v /usr/local/dcmi:/usr/local/dcmi \
|
|
||||||
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
|
||||||
-v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ \
|
|
||||||
-v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \
|
|
||||||
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
|
||||||
-v "${model_cache_dir}":/root/.cache/modelscope \
|
|
||||||
--entrypoint="" \
|
|
||||||
--name "${container_name}" \
|
|
||||||
"${image_name}" \
|
|
||||||
bash -c '
|
|
||||||
set -e
|
|
||||||
pytest -v -s tests/e2e/vllm_interface/
|
|
||||||
'
|
|
||||||
@@ -1,166 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -xu
|
|
||||||
|
|
||||||
|
|
||||||
remove_docker_container() {
|
|
||||||
docker rm -f tpu-test || true;
|
|
||||||
}
|
|
||||||
|
|
||||||
trap remove_docker_container EXIT
|
|
||||||
|
|
||||||
# Remove the container that might not be cleaned up in the previous run.
|
|
||||||
remove_docker_container
|
|
||||||
|
|
||||||
# Build the docker image.
|
|
||||||
docker build -f docker/Dockerfile.tpu -t vllm-tpu .
|
|
||||||
|
|
||||||
# Set up cleanup.
|
|
||||||
cleanup_docker() {
|
|
||||||
# Get Docker's root directory
|
|
||||||
docker_root=$(docker info -f '{{.DockerRootDir}}')
|
|
||||||
if [ -z "$docker_root" ]; then
|
|
||||||
echo "Failed to determine Docker root directory."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "Docker root directory: $docker_root"
|
|
||||||
# Check disk usage of the filesystem where Docker's root directory is located
|
|
||||||
disk_usage=$(df "$docker_root" | tail -1 | awk '{print $5}' | sed 's/%//')
|
|
||||||
# Define the threshold
|
|
||||||
threshold=70
|
|
||||||
if [ "$disk_usage" -gt "$threshold" ]; then
|
|
||||||
echo "Disk usage is above $threshold%. Cleaning up Docker images and volumes..."
|
|
||||||
# Remove dangling images (those that are not tagged and not used by any container)
|
|
||||||
docker image prune -f
|
|
||||||
# Remove unused volumes / force the system prune for old images as well.
|
|
||||||
docker volume prune -f && docker system prune --force --filter "until=72h" --all
|
|
||||||
echo "Docker images and volumes cleanup completed."
|
|
||||||
else
|
|
||||||
echo "Disk usage is below $threshold%. No cleanup needed."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
cleanup_docker
|
|
||||||
|
|
||||||
# For HF_TOKEN.
|
|
||||||
source /etc/environment
|
|
||||||
|
|
||||||
docker run --privileged --net host --shm-size=16G -it \
|
|
||||||
-e "HF_TOKEN=$HF_TOKEN" --name tpu-test \
|
|
||||||
vllm-tpu /bin/bash -c '
|
|
||||||
set -e # Exit immediately if a command exits with a non-zero status.
|
|
||||||
set -u # Treat unset variables as an error.
|
|
||||||
|
|
||||||
echo "--- Starting script inside Docker container ---"
|
|
||||||
|
|
||||||
# Create results directory
|
|
||||||
RESULTS_DIR=$(mktemp -d)
|
|
||||||
# If mktemp fails, set -e will cause the script to exit.
|
|
||||||
echo "Results will be stored in: $RESULTS_DIR"
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
echo "--- Installing Python dependencies ---"
|
|
||||||
python3 -m pip install --progress-bar off git+https://github.com/thuml/depyf.git \
|
|
||||||
&& python3 -m pip install --progress-bar off pytest pytest-asyncio tpu-info \
|
|
||||||
&& python3 -m pip install --progress-bar off "lm-eval[api]>=0.4.11" \
|
|
||||||
&& python3 -m pip install --progress-bar off hf-transfer tblib==3.1.0
|
|
||||||
echo "--- Python dependencies installed ---"
|
|
||||||
|
|
||||||
export VLLM_XLA_CHECK_RECOMPILATION=1
|
|
||||||
export VLLM_XLA_CACHE_PATH=
|
|
||||||
|
|
||||||
echo "--- Hardware Information ---"
|
|
||||||
# tpu-info
|
|
||||||
echo "--- Starting Tests ---"
|
|
||||||
set +e
|
|
||||||
overall_script_exit_code=0
|
|
||||||
|
|
||||||
# --- Test Definitions ---
|
|
||||||
# If a test fails, this function will print logs and will not cause the main script to exit.
|
|
||||||
run_test() {
|
|
||||||
local test_num=$1
|
|
||||||
local test_name=$2
|
|
||||||
local test_command=$3
|
|
||||||
local log_file="$RESULTS_DIR/test_${test_num}.log"
|
|
||||||
local actual_exit_code
|
|
||||||
|
|
||||||
echo "--- TEST_$test_num: Running $test_name ---"
|
|
||||||
|
|
||||||
# Execute the test command.
|
|
||||||
eval "$test_command" > >(tee -a "$log_file") 2> >(tee -a "$log_file" >&2)
|
|
||||||
actual_exit_code=$?
|
|
||||||
|
|
||||||
echo "TEST_${test_num}_COMMAND_EXIT_CODE: $actual_exit_code" # This goes to main log
|
|
||||||
echo "TEST_${test_num}_COMMAND_EXIT_CODE: $actual_exit_code" >> "$log_file" # Also to per-test log
|
|
||||||
|
|
||||||
if [ "$actual_exit_code" -ne 0 ]; then
|
|
||||||
echo "TEST_$test_num ($test_name) FAILED with exit code $actual_exit_code." >&2
|
|
||||||
echo "--- Log for failed TEST_$test_num ($test_name) ---" >&2
|
|
||||||
if [ -f "$log_file" ]; then
|
|
||||||
cat "$log_file" >&2
|
|
||||||
else
|
|
||||||
echo "Log file $log_file not found for TEST_$test_num ($test_name)." >&2
|
|
||||||
fi
|
|
||||||
echo "--- End of log for TEST_$test_num ($test_name) ---" >&2
|
|
||||||
return "$actual_exit_code" # Return the failure code
|
|
||||||
else
|
|
||||||
echo "TEST_$test_num ($test_name) PASSED."
|
|
||||||
return 0 # Return success
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Helper function to call run_test and update the overall script exit code
|
|
||||||
run_and_track_test() {
|
|
||||||
local test_num_arg="$1"
|
|
||||||
local test_name_arg="$2"
|
|
||||||
local test_command_arg="$3"
|
|
||||||
|
|
||||||
# Run the test
|
|
||||||
run_test "$test_num_arg" "$test_name_arg" "$test_command_arg"
|
|
||||||
local test_specific_exit_code=$?
|
|
||||||
|
|
||||||
# If the test failed, set the overall script exit code to 1
|
|
||||||
if [ "$test_specific_exit_code" -ne 0 ]; then
|
|
||||||
# No need for extra echo here, run_test already logged the failure.
|
|
||||||
overall_script_exit_code=1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# --- Actual Test Execution ---
|
|
||||||
run_and_track_test 1 "test_struct_output_generate.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/entrypoints/llm/test_struct_output_generate.py -k \"not test_structured_output_with_reasoning_matrices\""
|
|
||||||
run_and_track_test 2 "test_moe_pallas.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/tpu/test_moe_pallas.py"
|
|
||||||
run_and_track_test 3 "test_lora.py" \
|
|
||||||
"VLLM_XLA_CHECK_RECOMPILATION=0 python3 -m pytest -s -v /workspace/vllm/tests/tpu/lora/test_lora.py"
|
|
||||||
run_and_track_test 4 "test_tpu_qkv_linear.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_tpu_qkv_linear.py"
|
|
||||||
run_and_track_test 5 "test_spmd_model_weight_loading.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_spmd_model_weight_loading.py"
|
|
||||||
run_and_track_test 6 "test_kv_cache_update_kernel.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_kv_cache_update_kernel.py"
|
|
||||||
run_and_track_test 7 "test_tpu_int8.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_tpu_int8.py"
|
|
||||||
|
|
||||||
# After all tests have been attempted, exit with the overall status.
|
|
||||||
if [ "$overall_script_exit_code" -ne 0 ]; then
|
|
||||||
echo "--- One or more tests FAILED. Overall script exiting with failure code 1. ---"
|
|
||||||
else
|
|
||||||
echo "--- All tests have completed and PASSED. Overall script exiting with success code 0. ---"
|
|
||||||
fi
|
|
||||||
exit "$overall_script_exit_code"
|
|
||||||
' # IMPORTANT: This is the closing single quote for the bash -c "..." command. Ensure it is present and correct.
|
|
||||||
|
|
||||||
# Capture the exit code of the docker run command
|
|
||||||
DOCKER_RUN_EXIT_CODE=$?
|
|
||||||
|
|
||||||
# The trap will run for cleanup.
|
|
||||||
# Exit the main script with the Docker run command's exit code.
|
|
||||||
if [ "$DOCKER_RUN_EXIT_CODE" -ne 0 ]; then
|
|
||||||
echo "Docker run command failed with exit code $DOCKER_RUN_EXIT_CODE."
|
|
||||||
exit "$DOCKER_RUN_EXIT_CODE"
|
|
||||||
else
|
|
||||||
echo "Docker run command completed successfully."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
# TODO: This test fails because it uses RANDOM_SEED sampling
|
|
||||||
# pytest -v -s /workspace/vllm/tests/tpu/test_custom_dispatcher.py \
|
|
||||||
@@ -1,174 +1,54 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
set -xu
|
set -xue
|
||||||
|
|
||||||
|
|
||||||
remove_docker_container() {
|
|
||||||
docker rm -f tpu-test || true;
|
|
||||||
}
|
|
||||||
|
|
||||||
trap remove_docker_container EXIT
|
|
||||||
|
|
||||||
# Remove the container that might not be cleaned up in the previous run.
|
|
||||||
remove_docker_container
|
|
||||||
|
|
||||||
# Build the docker image.
|
# Build the docker image.
|
||||||
docker build -f docker/Dockerfile.tpu -t vllm-tpu .
|
docker build -f docker/Dockerfile.tpu -t vllm-tpu .
|
||||||
|
|
||||||
# Set up cleanup.
|
# Set up cleanup.
|
||||||
cleanup_docker() {
|
remove_docker_container() { docker rm -f tpu-test || true; }
|
||||||
# Get Docker's root directory
|
trap remove_docker_container EXIT
|
||||||
docker_root=$(docker info -f '{{.DockerRootDir}}')
|
# Remove the container that might not be cleaned up in the previous run.
|
||||||
if [ -z "$docker_root" ]; then
|
remove_docker_container
|
||||||
echo "Failed to determine Docker root directory."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "Docker root directory: $docker_root"
|
|
||||||
# Check disk usage of the filesystem where Docker's root directory is located
|
|
||||||
disk_usage=$(df "$docker_root" | tail -1 | awk '{print $5}' | sed 's/%//')
|
|
||||||
# Define the threshold
|
|
||||||
threshold=70
|
|
||||||
if [ "$disk_usage" -gt "$threshold" ]; then
|
|
||||||
echo "Disk usage is above $threshold%. Cleaning up Docker images and volumes..."
|
|
||||||
# Remove dangling images (those that are not tagged and not used by any container)
|
|
||||||
docker image prune -f
|
|
||||||
# Remove unused volumes / force the system prune for old images as well.
|
|
||||||
docker volume prune -f && docker system prune --force --filter "until=72h" --all
|
|
||||||
echo "Docker images and volumes cleanup completed."
|
|
||||||
else
|
|
||||||
echo "Disk usage is below $threshold%. No cleanup needed."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
cleanup_docker
|
|
||||||
|
|
||||||
# For HF_TOKEN.
|
# For HF_TOKEN.
|
||||||
source /etc/environment
|
source /etc/environment
|
||||||
|
# Run a simple end-to-end example.
|
||||||
docker run --privileged --net host --shm-size=16G -it \
|
docker run --privileged --net host --shm-size=16G -it \
|
||||||
-e "HF_TOKEN=$HF_TOKEN" --name tpu-test \
|
-e "HF_TOKEN=$HF_TOKEN" --name tpu-test \
|
||||||
vllm-tpu /bin/bash -c '
|
vllm-tpu /bin/bash -c "python3 -m pip install git+https://github.com/thuml/depyf.git \
|
||||||
set -e # Exit immediately if a command exits with a non-zero status.
|
&& python3 -m pip install pytest pytest-asyncio tpu-info \
|
||||||
set -u # Treat unset variables as an error.
|
&& python3 -m pip install lm_eval[api]==0.4.4 \
|
||||||
|
&& export VLLM_XLA_CACHE_PATH= \
|
||||||
|
&& export VLLM_USE_V1=1 \
|
||||||
|
&& export VLLM_XLA_CHECK_RECOMPILATION=1 \
|
||||||
|
&& echo HARDWARE \
|
||||||
|
&& tpu-info \
|
||||||
|
&& echo TEST_0 \
|
||||||
|
&& pytest -v -s /workspace/vllm/tests/v1/tpu/test_perf.py \
|
||||||
|
&& echo TEST_1 \
|
||||||
|
&& pytest -v -s /workspace/vllm/tests/tpu/test_compilation.py \
|
||||||
|
&& echo TEST_2 \
|
||||||
|
&& pytest -v -s /workspace/vllm/tests/v1/tpu/test_basic.py \
|
||||||
|
&& echo TEST_3 \
|
||||||
|
&& pytest -v -s /workspace/vllm/tests/entrypoints/llm/test_accuracy.py::test_lm_eval_accuracy_v1_engine \
|
||||||
|
&& echo TEST_4 \
|
||||||
|
&& pytest -s -v /workspace/vllm/tests/tpu/test_quantization_accuracy.py \
|
||||||
|
&& echo TEST_5 \
|
||||||
|
&& python3 /workspace/vllm/examples/offline_inference/tpu.py \
|
||||||
|
&& echo TEST_6 \
|
||||||
|
&& pytest -s -v /workspace/vllm/tests/v1/tpu/worker/test_tpu_model_runner.py \
|
||||||
|
&& echo TEST_7 \
|
||||||
|
&& pytest -s -v /workspace/vllm/tests/v1/tpu/test_sampler.py \
|
||||||
|
&& echo TEST_8 \
|
||||||
|
&& pytest -s -v /workspace/vllm/tests/v1/tpu/test_topk_topp_sampler.py \
|
||||||
|
&& echo TEST_9 \
|
||||||
|
&& pytest -s -v /workspace/vllm/tests/v1/tpu/test_multimodal.py \
|
||||||
|
&& echo TEST_10 \
|
||||||
|
&& pytest -s -v /workspace/vllm/tests/v1/tpu/test_pallas.py \
|
||||||
|
&& echo TEST_11 \
|
||||||
|
&& pytest -s -v /workspace/vllm/tests/v1/entrypoints/llm/test_struct_output_generate.py" \
|
||||||
|
|
||||||
echo "--- Starting script inside Docker container ---"
|
|
||||||
|
|
||||||
# Create results directory
|
|
||||||
RESULTS_DIR=$(mktemp -d)
|
|
||||||
# If mktemp fails, set -e will cause the script to exit.
|
|
||||||
echo "Results will be stored in: $RESULTS_DIR"
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
echo "--- Installing Python dependencies ---"
|
|
||||||
python3 -m pip install --progress-bar off git+https://github.com/thuml/depyf.git \
|
|
||||||
&& python3 -m pip install --progress-bar off pytest pytest-asyncio tpu-info \
|
|
||||||
&& python3 -m pip install --progress-bar off "lm-eval[api]>=0.4.11" \
|
|
||||||
&& python3 -m pip install --progress-bar off hf-transfer tblib==3.1.0
|
|
||||||
echo "--- Python dependencies installed ---"
|
|
||||||
|
|
||||||
export VLLM_XLA_CHECK_RECOMPILATION=1
|
|
||||||
export VLLM_XLA_CACHE_PATH=
|
|
||||||
|
|
||||||
echo "--- Hardware Information ---"
|
|
||||||
# tpu-info
|
|
||||||
echo "--- Starting Tests ---"
|
|
||||||
set +e
|
|
||||||
overall_script_exit_code=0
|
|
||||||
|
|
||||||
# --- Test Definitions ---
|
|
||||||
# If a test fails, this function will print logs and will not cause the main script to exit.
|
|
||||||
run_test() {
|
|
||||||
local test_num=$1
|
|
||||||
local test_name=$2
|
|
||||||
local test_command=$3
|
|
||||||
local log_file="$RESULTS_DIR/test_${test_num}.log"
|
|
||||||
local actual_exit_code
|
|
||||||
|
|
||||||
echo "--- TEST_$test_num: Running $test_name ---"
|
|
||||||
|
|
||||||
# Execute the test command.
|
|
||||||
eval "$test_command" > >(tee -a "$log_file") 2> >(tee -a "$log_file" >&2)
|
|
||||||
actual_exit_code=$?
|
|
||||||
|
|
||||||
echo "TEST_${test_num}_COMMAND_EXIT_CODE: $actual_exit_code" # This goes to main log
|
|
||||||
echo "TEST_${test_num}_COMMAND_EXIT_CODE: $actual_exit_code" >> "$log_file" # Also to per-test log
|
|
||||||
|
|
||||||
if [ "$actual_exit_code" -ne 0 ]; then
|
|
||||||
echo "TEST_$test_num ($test_name) FAILED with exit code $actual_exit_code." >&2
|
|
||||||
echo "--- Log for failed TEST_$test_num ($test_name) ---" >&2
|
|
||||||
if [ -f "$log_file" ]; then
|
|
||||||
cat "$log_file" >&2
|
|
||||||
else
|
|
||||||
echo "Log file $log_file not found for TEST_$test_num ($test_name)." >&2
|
|
||||||
fi
|
|
||||||
echo "--- End of log for TEST_$test_num ($test_name) ---" >&2
|
|
||||||
return "$actual_exit_code" # Return the failure code
|
|
||||||
else
|
|
||||||
echo "TEST_$test_num ($test_name) PASSED."
|
|
||||||
return 0 # Return success
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Helper function to call run_test and update the overall script exit code
|
|
||||||
run_and_track_test() {
|
|
||||||
local test_num_arg="$1"
|
|
||||||
local test_name_arg="$2"
|
|
||||||
local test_command_arg="$3"
|
|
||||||
|
|
||||||
# Run the test
|
|
||||||
run_test "$test_num_arg" "$test_name_arg" "$test_command_arg"
|
|
||||||
local test_specific_exit_code=$?
|
|
||||||
|
|
||||||
# If the test failed, set the overall script exit code to 1
|
|
||||||
if [ "$test_specific_exit_code" -ne 0 ]; then
|
|
||||||
# No need for extra echo here, run_test already logged the failure.
|
|
||||||
overall_script_exit_code=1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# --- Actual Test Execution ---
|
|
||||||
run_and_track_test 0 "test_perf.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_perf.py"
|
|
||||||
run_and_track_test 1 "test_compilation.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/tpu/test_compilation.py"
|
|
||||||
run_and_track_test 2 "test_basic.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_basic.py"
|
|
||||||
run_and_track_test 3 "test_accuracy.py::test_lm_eval_accuracy_v1_engine" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/entrypoints/llm/test_accuracy.py::test_lm_eval_accuracy_v1_engine"
|
|
||||||
run_and_track_test 4 "test_quantization_accuracy.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/tpu/test_quantization_accuracy.py"
|
|
||||||
run_and_track_test 5 "examples/offline_inference/tpu.py" \
|
|
||||||
"python3 /workspace/vllm/examples/offline_inference/tpu.py"
|
|
||||||
run_and_track_test 6 "test_tpu_model_runner.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/worker/test_tpu_model_runner.py"
|
|
||||||
run_and_track_test 7 "test_sampler.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_sampler.py"
|
|
||||||
run_and_track_test 8 "test_topk_topp_sampler.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_topk_topp_sampler.py"
|
|
||||||
run_and_track_test 9 "test_multimodal.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_multimodal.py"
|
|
||||||
run_and_track_test 10 "test_pallas.py" \
|
|
||||||
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_pallas.py"
|
|
||||||
|
|
||||||
# After all tests have been attempted, exit with the overall status.
|
|
||||||
if [ "$overall_script_exit_code" -ne 0 ]; then
|
|
||||||
echo "--- One or more tests FAILED. Overall script exiting with failure code 1. ---"
|
|
||||||
else
|
|
||||||
echo "--- All tests have completed and PASSED. Overall script exiting with success code 0. ---"
|
|
||||||
fi
|
|
||||||
exit "$overall_script_exit_code"
|
|
||||||
' # IMPORTANT: This is the closing single quote for the bash -c "..." command. Ensure it is present and correct.
|
|
||||||
|
|
||||||
# Capture the exit code of the docker run command
|
|
||||||
DOCKER_RUN_EXIT_CODE=$?
|
|
||||||
|
|
||||||
# The trap will run for cleanup.
|
|
||||||
# Exit the main script with the Docker run command's exit code.
|
|
||||||
if [ "$DOCKER_RUN_EXIT_CODE" -ne 0 ]; then
|
|
||||||
echo "Docker run command failed with exit code $DOCKER_RUN_EXIT_CODE."
|
|
||||||
exit "$DOCKER_RUN_EXIT_CODE"
|
|
||||||
else
|
|
||||||
echo "Docker run command completed successfully."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
# TODO: This test fails because it uses RANDOM_SEED sampling
|
# TODO: This test fails because it uses RANDOM_SEED sampling
|
||||||
# pytest -v -s /workspace/vllm/tests/tpu/test_custom_dispatcher.py \
|
# && VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/tpu/test_custom_dispatcher.py \
|
||||||
|
|||||||
@@ -8,11 +8,11 @@ image_name="xpu/vllm-ci:${BUILDKITE_COMMIT}"
|
|||||||
container_name="xpu_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
container_name="xpu_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
||||||
|
|
||||||
# Try building the docker image
|
# Try building the docker image
|
||||||
docker build -t "${image_name}" -f docker/Dockerfile.xpu .
|
docker build -t ${image_name} -f docker/Dockerfile.xpu .
|
||||||
|
|
||||||
# Setup cleanup
|
# Setup cleanup
|
||||||
remove_docker_container() {
|
remove_docker_container() {
|
||||||
docker rm -f "${container_name}" || true;
|
docker rm -f "${container_name}" || true;
|
||||||
docker image rm -f "${image_name}" || true;
|
docker image rm -f "${image_name}" || true;
|
||||||
docker system prune -f || true;
|
docker system prune -f || true;
|
||||||
}
|
}
|
||||||
@@ -20,36 +20,12 @@ trap remove_docker_container EXIT
|
|||||||
|
|
||||||
# Run the image and test offline inference/tensor parallel
|
# Run the image and test offline inference/tensor parallel
|
||||||
docker run \
|
docker run \
|
||||||
--device /dev/dri:/dev/dri \
|
--device /dev/dri \
|
||||||
--net=host \
|
|
||||||
--ipc=host \
|
|
||||||
--privileged \
|
|
||||||
-v /dev/dri/by-path:/dev/dri/by-path \
|
-v /dev/dri/by-path:/dev/dri/by-path \
|
||||||
--entrypoint="" \
|
--entrypoint="" \
|
||||||
-e "HF_TOKEN=${HF_TOKEN}" \
|
|
||||||
-e "ZE_AFFINITY_MASK=${ZE_AFFINITY_MASK}" \
|
|
||||||
--name "${container_name}" \
|
--name "${container_name}" \
|
||||||
"${image_name}" \
|
"${image_name}" \
|
||||||
bash -c '
|
sh -c '
|
||||||
set -e
|
VLLM_USE_V1=0 python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m
|
||||||
echo $ZE_AFFINITY_MASK
|
VLLM_USE_V1=0 python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m -tp 2
|
||||||
pip install tblib==3.1.0
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 -O3 -cc.cudagraph_mode=NONE
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager -tp 2 --distributed-executor-backend ray
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager -tp 2 --distributed-executor-backend mp
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager --attention-backend=TRITON_ATTN
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager --quantization fp8
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model superjob/Qwen3-4B-Instruct-2507-GPTQ-Int4 --block-size 64 --enforce-eager
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model ibm-research/PowerMoE-3b --block-size 64 --enforce-eager -tp 2
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model ibm-research/PowerMoE-3b --block-size 64 --enforce-eager -tp 2 --enable-expert-parallel
|
|
||||||
cd tests
|
|
||||||
pytest -v -s v1/core --ignore=v1/core/test_reset_prefix_cache_e2e.py --ignore=v1/core/test_scheduler_e2e.py
|
|
||||||
pytest -v -s v1/engine
|
|
||||||
pytest -v -s v1/sample --ignore=v1/sample/test_logprobs.py --ignore=v1/sample/test_logprobs_e2e.py
|
|
||||||
pytest -v -s v1/worker --ignore=v1/worker/test_gpu_model_runner.py
|
|
||||||
pytest -v -s v1/structured_output
|
|
||||||
pytest -v -s v1/spec_decode --ignore=v1/spec_decode/test_max_len.py --ignore=v1/spec_decode/test_tree_attention.py --ignore=v1/spec_decode/test_speculators_eagle3.py --ignore=v1/spec_decode/test_acceptance_length.py
|
|
||||||
pytest -v -s v1/kv_connector/unit --ignore=v1/kv_connector/unit/test_multi_connector.py --ignore=v1/kv_connector/unit/test_nixl_connector.py --ignore=v1/kv_connector/unit/test_example_connector.py --ignore=v1/kv_connector/unit/test_lmcache_integration.py
|
|
||||||
pytest -v -s v1/test_serial_utils.py
|
|
||||||
'
|
'
|
||||||
|
|||||||
@@ -1,36 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# Get tag variant from argument, default to empty if not provided, should be something like "cu130".
|
|
||||||
# Due to limits in cleanup script, we must move variants to use separate tags like "cu130-nightly",
|
|
||||||
# otherwise they will be cleaned up together with the main "nightly" tags.
|
|
||||||
|
|
||||||
TAG_VARIANT="$1"
|
|
||||||
if [ -n "$TAG_VARIANT" ]; then
|
|
||||||
ORIG_TAG_SUFFIX="-$TAG_VARIANT"
|
|
||||||
TAG_NAME="$TAG_VARIANT-nightly"
|
|
||||||
else
|
|
||||||
ORIG_TAG_SUFFIX=""
|
|
||||||
TAG_NAME="nightly"
|
|
||||||
fi
|
|
||||||
|
|
||||||
ORIG_TAG_NAME="$BUILDKITE_COMMIT"
|
|
||||||
|
|
||||||
echo "Pushing original tag $ORIG_TAG_NAME$ORIG_TAG_SUFFIX to new nightly tag name: $TAG_NAME"
|
|
||||||
|
|
||||||
# pull original arch-dependent images from AWS ECR Public
|
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:"$ORIG_TAG_NAME"-x86_64"$ORIG_TAG_SUFFIX"
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:"$ORIG_TAG_NAME"-aarch64"$ORIG_TAG_SUFFIX"
|
|
||||||
# tag arch-dependent images
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:"$ORIG_TAG_NAME"-x86_64"$ORIG_TAG_SUFFIX" vllm/vllm-openai:"$TAG_NAME"-x86_64
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:"$ORIG_TAG_NAME"-aarch64"$ORIG_TAG_SUFFIX" vllm/vllm-openai:"$TAG_NAME"-aarch64
|
|
||||||
# push arch-dependent images to DockerHub
|
|
||||||
docker push vllm/vllm-openai:"$TAG_NAME"-x86_64
|
|
||||||
docker push vllm/vllm-openai:"$TAG_NAME"-aarch64
|
|
||||||
# push arch-independent manifest to DockerHub
|
|
||||||
docker manifest create vllm/vllm-openai:"$TAG_NAME" vllm/vllm-openai:"$TAG_NAME"-x86_64 vllm/vllm-openai:"$TAG_NAME"-aarch64 --amend
|
|
||||||
docker manifest create vllm/vllm-openai:"$TAG_NAME"-"$BUILDKITE_COMMIT" vllm/vllm-openai:"$TAG_NAME"-x86_64 vllm/vllm-openai:"$TAG_NAME"-aarch64 --amend
|
|
||||||
docker manifest push vllm/vllm-openai:"$TAG_NAME"
|
|
||||||
docker manifest push vllm/vllm-openai:"$TAG_NAME"-"$BUILDKITE_COMMIT"
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Usage: ./rerun_test.sh path/to/test.py::test_name
|
|
||||||
|
|
||||||
# Check if argument is given
|
|
||||||
if [ $# -lt 1 ]; then
|
|
||||||
echo "Usage: $0 path/to/test.py::test_name"
|
|
||||||
echo "Example: $0 tests/v1/engine/test_engine_core_client.py::test_kv_cache_events[True-tcp]"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
TEST=$1
|
|
||||||
COUNT=1
|
|
||||||
|
|
||||||
while pytest -sv "$TEST"; do
|
|
||||||
COUNT=$((COUNT + 1))
|
|
||||||
echo "RUN NUMBER ${COUNT}"
|
|
||||||
done
|
|
||||||
@@ -11,20 +11,20 @@ cd "$(dirname "${BASH_SOURCE[0]}")/../.."
|
|||||||
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
||||||
|
|
||||||
# run python-based benchmarks and upload the result to buildkite
|
# run python-based benchmarks and upload the result to buildkite
|
||||||
vllm bench latency --output-json latency_results.json 2>&1 | tee benchmark_latency.txt
|
python3 benchmarks/benchmark_latency.py --output-json latency_results.json 2>&1 | tee benchmark_latency.txt
|
||||||
bench_latency_exit_code=$?
|
bench_latency_exit_code=$?
|
||||||
|
|
||||||
vllm bench throughput --input-len 256 --output-len 256 --output-json throughput_results.json 2>&1 | tee benchmark_throughput.txt
|
python3 benchmarks/benchmark_throughput.py --input-len 256 --output-len 256 --output-json throughput_results.json 2>&1 | tee benchmark_throughput.txt
|
||||||
bench_throughput_exit_code=$?
|
bench_throughput_exit_code=$?
|
||||||
|
|
||||||
# run server-based benchmarks and upload the result to buildkite
|
# run server-based benchmarks and upload the result to buildkite
|
||||||
vllm serve meta-llama/Llama-2-7b-chat-hf &
|
python3 -m vllm.entrypoints.openai.api_server --model meta-llama/Llama-2-7b-chat-hf &
|
||||||
server_pid=$!
|
server_pid=$!
|
||||||
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
||||||
|
|
||||||
# wait for server to start, timeout after 600 seconds
|
# wait for server to start, timeout after 600 seconds
|
||||||
timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1
|
timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1
|
||||||
vllm bench serve \
|
python3 benchmarks/benchmark_serving.py \
|
||||||
--backend vllm \
|
--backend vllm \
|
||||||
--dataset-name sharegpt \
|
--dataset-name sharegpt \
|
||||||
--dataset-path ./ShareGPT_V3_unfiltered_cleaned_split.json \
|
--dataset-path ./ShareGPT_V3_unfiltered_cleaned_split.json \
|
||||||
|
|||||||
@@ -2,17 +2,6 @@
|
|||||||
|
|
||||||
set -euox pipefail
|
set -euox pipefail
|
||||||
|
|
||||||
# To detect ROCm
|
|
||||||
# Check multiple indicators:
|
|
||||||
if [ -e /dev/kfd ] || \
|
|
||||||
[ -d /opt/rocm ] || \
|
|
||||||
command -v rocm-smi &> /dev/null || \
|
|
||||||
[ -n "${ROCM_HOME:-}" ]; then
|
|
||||||
IS_ROCM=1
|
|
||||||
else
|
|
||||||
IS_ROCM=0
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $# -lt 4 ]]; then
|
if [[ $# -lt 4 ]]; then
|
||||||
echo "Usage: .buildkite/scripts/run-multi-node-test.sh WORKING_DIR NUM_NODES NUM_GPUS DOCKER_IMAGE COMMAND1 COMMAND2 ... COMMANDN"
|
echo "Usage: .buildkite/scripts/run-multi-node-test.sh WORKING_DIR NUM_NODES NUM_GPUS DOCKER_IMAGE COMMAND1 COMMAND2 ... COMMANDN"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -37,18 +26,13 @@ for command in "${COMMANDS[@]}"; do
|
|||||||
echo "$command"
|
echo "$command"
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
||||||
start_network() {
|
start_network() {
|
||||||
docker network create --subnet=192.168.10.0/24 docker-net
|
docker network create --subnet=192.168.10.0/24 docker-net
|
||||||
}
|
}
|
||||||
|
|
||||||
start_nodes() {
|
start_nodes() {
|
||||||
for node in $(seq 0 $(($NUM_NODES-1))); do
|
for node in $(seq 0 $(($NUM_NODES-1))); do
|
||||||
if [ "$IS_ROCM" -eq 1 ]; then
|
GPU_DEVICES='"device='
|
||||||
GPU_DEVICES='--device /dev/kfd --device /dev/dri -e HIP_VISIBLE_DEVICES='
|
|
||||||
else
|
|
||||||
GPU_DEVICES='--gpus "device='
|
|
||||||
fi
|
|
||||||
for node_gpu in $(seq 0 $(($NUM_GPUS - 1))); do
|
for node_gpu in $(seq 0 $(($NUM_GPUS - 1))); do
|
||||||
DEVICE_NUM=$(($node * $NUM_GPUS + $node_gpu))
|
DEVICE_NUM=$(($node * $NUM_GPUS + $node_gpu))
|
||||||
GPU_DEVICES+=$(($DEVICE_NUM))
|
GPU_DEVICES+=$(($DEVICE_NUM))
|
||||||
@@ -56,9 +40,7 @@ start_nodes() {
|
|||||||
GPU_DEVICES+=','
|
GPU_DEVICES+=','
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
if [ "$IS_ROCM" -eq 0 ]; then
|
GPU_DEVICES+='"'
|
||||||
GPU_DEVICES+='"'
|
|
||||||
fi
|
|
||||||
|
|
||||||
# start the container in detached mode
|
# start the container in detached mode
|
||||||
# things to note:
|
# things to note:
|
||||||
@@ -67,7 +49,7 @@ start_nodes() {
|
|||||||
# 3. map the huggingface cache directory to the container
|
# 3. map the huggingface cache directory to the container
|
||||||
# 3. assign ip addresses to the containers (head node: 192.168.10.10, worker nodes:
|
# 3. assign ip addresses to the containers (head node: 192.168.10.10, worker nodes:
|
||||||
# starting from 192.168.10.11)
|
# starting from 192.168.10.11)
|
||||||
docker run -d $GPU_DEVICES --shm-size=10.24gb -e HF_TOKEN \
|
docker run -d --gpus "$GPU_DEVICES" --shm-size=10.24gb -e HF_TOKEN \
|
||||||
-v ~/.cache/huggingface:/root/.cache/huggingface --name "node$node" \
|
-v ~/.cache/huggingface:/root/.cache/huggingface --name "node$node" \
|
||||||
--network docker-net --ip 192.168.10.$((10 + $node)) --rm "$DOCKER_IMAGE" \
|
--network docker-net --ip 192.168.10.$((10 + $node)) --rm "$DOCKER_IMAGE" \
|
||||||
/bin/bash -c "tail -f /dev/null"
|
/bin/bash -c "tail -f /dev/null"
|
||||||
|
|||||||
@@ -1,72 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -euxo pipefail
|
|
||||||
|
|
||||||
# args: [THRESHOLD] [NUM_QUESTIONS] [START_PORT]
|
|
||||||
THRESHOLD=${1:-0.25}
|
|
||||||
NUM_Q=${2:-1319}
|
|
||||||
PORT=${3:-8010}
|
|
||||||
OUT_DIR=${OUT_DIR:-/tmp/vllm-scheduled}
|
|
||||||
mkdir -p "${OUT_DIR}"
|
|
||||||
|
|
||||||
wait_for_server() {
|
|
||||||
local port=$1
|
|
||||||
timeout 600 bash -c '
|
|
||||||
until curl -sf "http://127.0.0.1:'"$port"'/health" > /dev/null; do
|
|
||||||
sleep 1
|
|
||||||
done'
|
|
||||||
}
|
|
||||||
|
|
||||||
MODEL="deepseek-ai/DeepSeek-V2-lite"
|
|
||||||
|
|
||||||
# Set BACKENDS based on platform
|
|
||||||
if command -v rocm-smi &> /dev/null || [[ -d /opt/rocm ]] || [[ -n "${ROCM_PATH:-}" ]]; then
|
|
||||||
# ROCm platform
|
|
||||||
BACKENDS=("allgather_reducescatter")
|
|
||||||
# Disable MOE padding for ROCm since it is causing eplb to fail
|
|
||||||
export VLLM_ROCM_MOE_PADDING=0
|
|
||||||
else
|
|
||||||
# Non-ROCm platform (CUDA/other)
|
|
||||||
BACKENDS=("deepep_high_throughput" "deepep_low_latency")
|
|
||||||
fi
|
|
||||||
|
|
||||||
cleanup() {
|
|
||||||
if [[ -n "${SERVER_PID:-}" ]] && kill -0 "${SERVER_PID}" 2>/dev/null; then
|
|
||||||
kill "${SERVER_PID}" 2>/dev/null || true
|
|
||||||
for _ in {1..20}; do
|
|
||||||
kill -0 "${SERVER_PID}" 2>/dev/null || break
|
|
||||||
sleep 0.5
|
|
||||||
done
|
|
||||||
kill -9 "${SERVER_PID}" 2>/dev/null || true
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
trap cleanup EXIT
|
|
||||||
|
|
||||||
for BACK in "${BACKENDS[@]}"; do
|
|
||||||
VLLM_DEEP_GEMM_WARMUP=skip \
|
|
||||||
vllm serve "$MODEL" \
|
|
||||||
--enforce-eager \
|
|
||||||
--tensor-parallel-size 2 \
|
|
||||||
--data-parallel-size 2 \
|
|
||||||
--enable-expert-parallel \
|
|
||||||
--enable-eplb \
|
|
||||||
--trust-remote-code \
|
|
||||||
--max-model-len 2048 \
|
|
||||||
--all2all-backend "$BACK" \
|
|
||||||
--port "$PORT" &
|
|
||||||
SERVER_PID=$!
|
|
||||||
wait_for_server "$PORT"
|
|
||||||
|
|
||||||
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
|
||||||
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
|
||||||
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port "$PORT" --num-questions "${NUM_Q}" --save-results "${OUT}"
|
|
||||||
python3 - <<PY
|
|
||||||
import json; acc=json.load(open('${OUT}'))['accuracy']
|
|
||||||
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
|
||||||
assert acc >= ${THRESHOLD}, f"${MODEL} ${BACK} accuracy {acc}"
|
|
||||||
PY
|
|
||||||
|
|
||||||
cleanup
|
|
||||||
SERVER_PID=
|
|
||||||
sleep 1
|
|
||||||
PORT=$((PORT+1))
|
|
||||||
done
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user