243 lines
8.2 KiB
Docker
243 lines
8.2 KiB
Docker
# This vLLM Dockerfile is used to build images that can run vLLM on both x86_64 and arm64 CPU platforms.
|
|
#
|
|
# Supported platforms:
|
|
# - linux/amd64 (x86_64)
|
|
# - linux/arm64 (aarch64)
|
|
#
|
|
# Use the `--platform` option with `docker buildx build` to specify the target architecture, e.g.:
|
|
# docker buildx build --platform=linux/arm64 -f docker/Dockerfile.cpu .
|
|
#
|
|
# Build targets:
|
|
# vllm-openai (default): used for serving deployment
|
|
# vllm-openai-zen: vLLM from source + zentorch from PyPI via vllm[zen]
|
|
# vllm-test: used for CI tests
|
|
# vllm-dev: used for development
|
|
#
|
|
# Build arguments:
|
|
# PYTHON_VERSION=3.13|3.12 (default)|3.11|3.10
|
|
# VLLM_CPU_X86=false (default)|true (for cross-compilation)
|
|
# VLLM_CPU_ARM_BF16=false (default)|true (for cross-compilation)
|
|
#
|
|
|
|
######################### COMMON BASE IMAGE #########################
|
|
FROM ubuntu:22.04 AS base-common
|
|
|
|
WORKDIR /workspace
|
|
|
|
ARG PYTHON_VERSION=3.12
|
|
ARG PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
|
|
|
|
# Install minimal dependencies and uv
|
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
apt-get update -y \
|
|
&& apt-get install -y --no-install-recommends sudo ccache git curl wget ca-certificates \
|
|
gcc-12 g++-12 libtcmalloc-minimal4 libnuma-dev ffmpeg libsm6 libxext6 libgl1 jq lsof make xz-utils \
|
|
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 \
|
|
&& curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
|
|
ENV CC=/usr/bin/gcc-12 CXX=/usr/bin/g++-12
|
|
ENV CCACHE_DIR=/root/.cache/ccache
|
|
ENV CMAKE_CXX_COMPILER_LAUNCHER=ccache
|
|
|
|
ENV PATH="/root/.local/bin:$PATH"
|
|
ENV VIRTUAL_ENV="/opt/venv"
|
|
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
|
|
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
|
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
|
|
|
ENV UV_HTTP_TIMEOUT=500
|
|
|
|
# Install Python dependencies
|
|
ENV PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}
|
|
ENV UV_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}
|
|
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
|
ENV UV_LINK_MODE="copy"
|
|
|
|
# Copy requirements files for installation
|
|
COPY requirements/common.txt requirements/common.txt
|
|
COPY requirements/cpu.txt requirements/cpu.txt
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install --upgrade pip && \
|
|
uv pip install -r requirements/cpu.txt
|
|
|
|
ARG TARGETARCH
|
|
ENV TARGETARCH=${TARGETARCH}
|
|
|
|
######################### x86_64 BASE IMAGE #########################
|
|
FROM base-common AS base-amd64
|
|
|
|
ENV LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/opt/venv/lib/libiomp5.so"
|
|
|
|
######################### arm64 BASE IMAGE #########################
|
|
FROM base-common AS base-arm64
|
|
|
|
ENV LD_PRELOAD="/usr/lib/aarch64-linux-gnu/libtcmalloc_minimal.so.4"
|
|
|
|
######################### BASE IMAGE #########################
|
|
FROM base-${TARGETARCH} AS base
|
|
|
|
RUN echo 'ulimit -c 0' >> ~/.bashrc
|
|
|
|
######################### BUILD IMAGE #########################
|
|
FROM base AS vllm-build
|
|
|
|
ARG max_jobs=32
|
|
ENV MAX_JOBS=${max_jobs}
|
|
|
|
ARG GIT_REPO_CHECK=0
|
|
# Support for cross-compilation with x86 ISA including AVX2 and AVX512: docker build --build-arg VLLM_CPU_X86="true" ...
|
|
ARG VLLM_CPU_X86=0
|
|
ENV VLLM_CPU_X86=${VLLM_CPU_X86}
|
|
# Support for cross-compilation with ARM BF16 ISA: docker build --build-arg VLLM_CPU_ARM_BF16="true" ...
|
|
ARG VLLM_CPU_ARM_BF16=0
|
|
ENV VLLM_CPU_ARM_BF16=${VLLM_CPU_ARM_BF16}
|
|
|
|
WORKDIR /vllm-workspace
|
|
|
|
# Validate build arguments - prevent mixing incompatible ISA flags
|
|
RUN if [ "$TARGETARCH" = "arm64" ] && [ "$VLLM_CPU_X86" != "0" ]; then \
|
|
echo "ERROR: Cannot use x86-specific ISA flags (AVX2, AVX512, etc.) when building for ARM64 (--platform=linux/arm64)"; \
|
|
exit 1; \
|
|
fi && \
|
|
if [ "$TARGETARCH" = "amd64" ] && [ "$VLLM_CPU_ARM_BF16" != "0" ]; then \
|
|
echo "ERROR: Cannot use ARM-specific ISA flags (ARM_BF16) when building for x86_64 (--platform=linux/amd64)"; \
|
|
exit 1; \
|
|
fi
|
|
|
|
# Copy build requirements
|
|
COPY requirements/cpu-build.txt requirements/build.txt
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install -r requirements/build.txt
|
|
|
|
COPY . .
|
|
|
|
RUN if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh ; fi
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
--mount=type=cache,target=/root/.cache/ccache \
|
|
--mount=type=cache,target=/vllm-workspace/.deps,sharing=locked \
|
|
VLLM_TARGET_DEVICE=cpu python3 setup.py bdist_wheel --dist-dir=dist --py-limited-api=cp38
|
|
|
|
######################### TEST DEPS #########################
|
|
FROM base AS vllm-test-deps
|
|
|
|
WORKDIR /vllm-workspace
|
|
|
|
# Copy test requirements
|
|
COPY requirements/test.in requirements/cpu-test.in
|
|
|
|
RUN \
|
|
sed -i '/mamba_ssm/d' requirements/cpu-test.in && \
|
|
remove_packages_not_supported_on_aarch64() { \
|
|
case "$(uname -m)" in \
|
|
aarch64|arm64) \
|
|
sed -i '/decord/d' requirements/cpu-test.in; \
|
|
sed -i '/terratorch/d' requirements/cpu-test.in; \
|
|
;; \
|
|
esac; \
|
|
}; \
|
|
remove_packages_not_supported_on_aarch64 && \
|
|
sed -i 's/^torch==.*/torch==2.11.0/g' requirements/cpu-test.in && \
|
|
sed -i 's/torchaudio.*/torchaudio/g' requirements/cpu-test.in && \
|
|
sed -i 's/torchvision.*/torchvision/g' requirements/cpu-test.in && \
|
|
uv pip compile requirements/cpu-test.in -o requirements/cpu-test.txt --index-strategy unsafe-best-match --torch-backend cpu
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install -r requirements/cpu-test.txt
|
|
|
|
######################### DEV IMAGE #########################
|
|
FROM vllm-build AS vllm-dev
|
|
|
|
WORKDIR /vllm-workspace
|
|
|
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
apt-get install -y --no-install-recommends vim numactl clangd-14
|
|
|
|
RUN ln -s /usr/bin/clangd-14 /usr/bin/clangd
|
|
|
|
# install development dependencies (for testing)
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install --no-build-isolation -e tests/vllm_test_utils
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
--mount=type=cache,target=/root/.cache/ccache \
|
|
--mount=type=bind,source=.git,target=.git \
|
|
VLLM_TARGET_DEVICE=cpu python3 setup.py develop
|
|
|
|
COPY --from=vllm-test-deps /vllm-workspace/requirements/cpu-test.txt requirements/test.txt
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install -r requirements/dev.txt && \
|
|
pre-commit install --hook-type pre-commit --hook-type commit-msg
|
|
|
|
ENTRYPOINT ["bash"]
|
|
|
|
######################### TEST IMAGE #########################
|
|
FROM vllm-test-deps AS vllm-test
|
|
|
|
WORKDIR /vllm-workspace
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
--mount=type=bind,from=vllm-build,src=/vllm-workspace/dist,target=dist \
|
|
uv pip install dist/*.whl
|
|
|
|
ADD ./tests/ ./tests/
|
|
ADD ./examples/ ./examples/
|
|
ADD ./benchmarks/ ./benchmarks/
|
|
ADD ./vllm/collect_env.py .
|
|
ADD ./.buildkite/ ./.buildkite/
|
|
|
|
# install development dependencies (for testing)
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install -e tests/vllm_test_utils
|
|
|
|
######################### RELEASE IMAGE #########################
|
|
FROM base AS vllm-openai
|
|
|
|
WORKDIR /vllm-workspace
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
--mount=type=cache,target=/root/.cache/ccache \
|
|
--mount=type=bind,from=vllm-build,src=/vllm-workspace/dist,target=dist \
|
|
uv pip install dist/*.whl && \
|
|
uv pip install "vllm[audio]"
|
|
|
|
# Add labels to document build configuration
|
|
LABEL org.opencontainers.image.title="vLLM CPU"
|
|
LABEL org.opencontainers.image.description="vLLM inference engine for CPU platforms"
|
|
LABEL org.opencontainers.image.vendor="vLLM Project"
|
|
LABEL org.opencontainers.image.source="https://github.com/vllm-project/vllm"
|
|
|
|
# Build configuration labels
|
|
ARG TARGETARCH
|
|
ARG VLLM_CPU_X86
|
|
ARG VLLM_CPU_ARM_BF16
|
|
ARG PYTHON_VERSION
|
|
|
|
LABEL ai.vllm.build.target-arch="${TARGETARCH}"
|
|
LABEL ai.vllm.build.cpu-x86="${VLLM_CPU_X86:-false}"
|
|
LABEL ai.vllm.build.cpu-arm-bf16="${VLLM_CPU_ARM_BF16:-false}"
|
|
LABEL ai.vllm.build.python-version="${PYTHON_VERSION:-3.12}"
|
|
|
|
ENTRYPOINT ["vllm", "serve"]
|
|
|
|
|
|
######################### ZEN CPU PYPI IMAGE #########################
|
|
FROM vllm-openai AS vllm-openai-zen
|
|
|
|
ARG TARGETARCH
|
|
|
|
RUN if [ "$TARGETARCH" != "amd64" ]; then \
|
|
echo "ERROR: vllm-openai-amd only supports --platform=linux/amd64"; \
|
|
exit 1; \
|
|
fi
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install "vllm[zen]"
|
|
|
|
ENTRYPOINT ["vllm", "serve"]
|