2026-04-15 09:52:07 +00:00
|
|
|
FROM vllm/vllm-openai:v0.19.0-cu130
|
2026-04-13 23:42:31 +00:00
|
|
|
|
|
|
|
|
# Patch tool parser for GLM regex fix
|
|
|
|
|
COPY deepseekv32_tool_parser.py /usr/local/lib/python3.12/dist-packages/vllm/tool_parsers/deepseekv32_tool_parser.py
|
2026-04-15 09:48:54 +00:00
|
|
|
|
|
|
|
|
# Install LMCache for KV cache offloading / sharing across nodes
|
|
|
|
|
# Build with system CUDA 13.0 for Blackwell (B200)
|
|
|
|
|
RUN apt-get update && apt-get install -y git \
|
|
|
|
|
libcusolver-dev-13-0 \
|
|
|
|
|
libcusparse-dev-13-0 \
|
|
|
|
|
libcublas-dev-13-0 \
|
|
|
|
|
libcurand-dev-13-0 \
|
|
|
|
|
libcufft-dev-13-0 \
|
|
|
|
|
libnvjitlink-dev-13-0 && \
|
|
|
|
|
git clone --depth 1 https://github.com/LMCache/LMCache.git /tmp/lmcache && \
|
|
|
|
|
cd /tmp/lmcache && \
|
|
|
|
|
CUDA_HOME=/usr/local/cuda \
|
|
|
|
|
TORCH_CUDA_ARCH_LIST="10.0" \
|
|
|
|
|
pip install --no-cache-dir --no-build-isolation . && \
|
|
|
|
|
rm -rf /tmp/lmcache
|