Signed-off-by: sihao.li <sihao.li@intel.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com>
180 lines
6.5 KiB
Docker
180 lines
6.5 KiB
Docker
FROM intel/deep-learning-essentials:2025.3.2-0-devel-ubuntu24.04 AS vllm-base
|
|
|
|
WORKDIR /workspace/
|
|
|
|
ARG PYTHON_VERSION=3.12
|
|
ARG PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/xpu"
|
|
|
|
RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && \
|
|
echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list
|
|
|
|
RUN apt clean && apt-get update -y && \
|
|
apt-get install -y --no-install-recommends --fix-missing \
|
|
curl \
|
|
ffmpeg \
|
|
git \
|
|
libsndfile1 \
|
|
libsm6 \
|
|
libxext6 \
|
|
libgl1 \
|
|
lsb-release \
|
|
libaio-dev \
|
|
numactl \
|
|
wget \
|
|
vim \
|
|
python3.12 \
|
|
python3.12-dev \
|
|
python3-pip
|
|
|
|
RUN apt update && apt upgrade -y && \
|
|
apt install -y intel-oneapi-compiler-dpcpp-cpp-2025.3
|
|
|
|
# Install UMD
|
|
RUN mkdir neo && \
|
|
cd neo && \
|
|
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.24.8/intel-igc-core-2_2.24.8+20344_amd64.deb && \
|
|
wget https://github.com/intel/intel-graphics-compiler/releases/download/v2.24.8/intel-igc-opencl-2_2.24.8+20344_amd64.deb && \
|
|
wget https://github.com/intel/compute-runtime/releases/download/25.48.36300.8/intel-ocloc_25.48.36300.8-0_amd64.deb && \
|
|
wget https://github.com/intel/compute-runtime/releases/download/25.48.36300.8/intel-opencl-icd_25.48.36300.8-0_amd64.deb && \
|
|
wget https://github.com/intel/compute-runtime/releases/download/25.48.36300.8/libigdgmm12_22.8.2_amd64.deb && \
|
|
wget https://github.com/intel/compute-runtime/releases/download/25.48.36300.8/libze-intel-gpu1_25.48.36300.8-0_amd64.deb && \
|
|
wget https://github.com/oneapi-src/level-zero/releases/download/v1.26.0/level-zero_1.26.0+u24.04_amd64.deb && \
|
|
dpkg -i *.deb && \
|
|
cd .. && \
|
|
rm -rf neo
|
|
|
|
ENV PATH="/root/.local/bin:$PATH"
|
|
ENV VIRTUAL_ENV="/opt/venv"
|
|
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
|
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
|
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
|
|
|
# This oneccl contains the BMG support which is not the case for default version of oneapi 2025.2.
|
|
ARG ONECCL_INSTALLER="intel-oneccl-2021.15.7.8_offline.sh"
|
|
RUN wget "https://github.com/uxlfoundation/oneCCL/releases/download/2021.15.7/${ONECCL_INSTALLER}" && \
|
|
bash "${ONECCL_INSTALLER}" -a --silent --eula accept && \
|
|
rm "${ONECCL_INSTALLER}" && \
|
|
echo "source /opt/intel/oneapi/setvars.sh --force" >> /root/.bashrc && \
|
|
echo "source /opt/intel/oneapi/ccl/2021.15/env/vars.sh --force" >> /root/.bashrc
|
|
RUN rm -f /opt/intel/oneapi/ccl/latest && \
|
|
ln -s /opt/intel/oneapi/ccl/2021.15 /opt/intel/oneapi/ccl/latest
|
|
|
|
SHELL ["bash", "-c"]
|
|
CMD ["bash", "-c", "source /root/.bashrc && exec bash"]
|
|
|
|
WORKDIR /workspace/vllm
|
|
|
|
ENV UV_HTTP_TIMEOUT=500
|
|
|
|
# Configure package index for XPU
|
|
ENV PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}
|
|
ENV UV_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}
|
|
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
|
ENV UV_LINK_MODE="copy"
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
--mount=type=bind,src=requirements/common.txt,target=/workspace/vllm/requirements/common.txt \
|
|
--mount=type=bind,src=requirements/xpu.txt,target=/workspace/vllm/requirements/xpu.txt \
|
|
--mount=type=bind,src=requirements/xpu-test.in,target=/workspace/vllm/requirements/xpu-test.in \
|
|
uv pip install --upgrade pip && \
|
|
uv pip install -r requirements/xpu.txt && \
|
|
uv pip compile /workspace/vllm/requirements/xpu-test.in \
|
|
-o /workspace/vllm/requirements/xpu-test.txt \
|
|
-c /workspace/vllm/requirements/xpu.txt \
|
|
--index-strategy unsafe-best-match \
|
|
--extra-index-url ${PIP_EXTRA_INDEX_URL} \
|
|
--python-version ${PYTHON_VERSION} && \
|
|
uv pip install grpcio-tools protobuf nanobind && \
|
|
source /opt/intel/oneapi/setvars.sh --force && \
|
|
source /opt/intel/oneapi/ccl/2021.15/env/vars.sh --force && \
|
|
export CMAKE_PREFIX_PATH="$(python3 -c 'import site; print(site.getsitepackages()[0])'):${CMAKE_PREFIX_PATH}" && \
|
|
uv pip install --no-build-isolation -r /workspace/vllm/requirements/xpu-test.txt
|
|
|
|
|
|
|
|
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/"
|
|
|
|
COPY . .
|
|
ARG GIT_REPO_CHECK=0
|
|
RUN --mount=type=bind,source=.git,target=.git \
|
|
if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh; fi
|
|
|
|
ENV VLLM_TARGET_DEVICE=xpu
|
|
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
|
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
--mount=type=bind,source=.git,target=.git \
|
|
uv pip install --no-build-isolation .
|
|
|
|
CMD ["/bin/bash"]
|
|
|
|
FROM vllm-base AS vllm-openai
|
|
|
|
# install development dependencies (for testing)
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip install -e tests/vllm_test_utils
|
|
|
|
# install NIXL and UCX from source code
|
|
ARG UCX_VERSION=e5d98879705239d254ede40b4a52891850cb5349
|
|
ARG NIXL_VERSION=0.7.0
|
|
|
|
RUN apt-get update && apt-get install -y \
|
|
pciutils \
|
|
net-tools \
|
|
iproute2 \
|
|
hwloc \
|
|
numactl \
|
|
wget \
|
|
curl \
|
|
git \
|
|
build-essential \
|
|
autoconf \
|
|
automake \
|
|
libtool \
|
|
pkg-config \
|
|
rdma-core \
|
|
libibverbs-dev \
|
|
ibverbs-utils \
|
|
libibverbs1 \
|
|
librdmacm-dev \
|
|
librdmacm1 \
|
|
libibumad-dev \
|
|
libibumad3 \
|
|
libibmad-dev \
|
|
libibmad5 \
|
|
infiniband-diags \
|
|
perftest \
|
|
ibutils \
|
|
libmlx5-1 \
|
|
libmlx4-1 \
|
|
ibverbs-providers \
|
|
librdmacm1t64
|
|
|
|
ENV PKG_CONFIG_PATH=/tmp/ucx_install/lib/pkgconfig:${PKG_CONFIG_PATH}
|
|
ENV LD_LIBRARY_PATH=/tmp/ucx_install/lib:${LD_LIBRARY_PATH}
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
git clone https://github.com/openucx/ucx /tmp/ucx_source && \
|
|
cd /tmp/ucx_source && git checkout "${UCX_VERSION}" && \
|
|
bash autogen.sh && \
|
|
./configure --prefix=/tmp/ucx_install --with-ze=yes --enable-examples --enable-mt && \
|
|
make CFLAGS="-Wno-error=incompatible-pointer-types" -j8 && make install && \
|
|
git clone https://github.com/ai-dynamo/nixl /tmp/nixl_source && \
|
|
cd /tmp/nixl_source && git checkout "${NIXL_VERSION}" && \
|
|
cd /tmp/nixl_source && \
|
|
uv pip install --upgrade meson pybind11 patchelf && \
|
|
uv pip install -r requirements.txt && \
|
|
uv pip install . && \
|
|
rm -rf /tmp/ucx_source /tmp/nixl_source
|
|
|
|
# FIX triton
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip uninstall triton triton-xpu && \
|
|
uv pip install triton-xpu==3.6.0
|
|
|
|
# remove torch bundled oneccl to avoid conflicts
|
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
|
uv pip uninstall oneccl oneccl-devel
|
|
|
|
ENTRYPOINT ["vllm", "serve"]
|