Files
vllm-with-lmcache/Dockerfile
2026-04-17 02:33:04 +00:00

29 lines
1.0 KiB
Docker

#FROM vllm/vllm-openai:v0.19.0-cu130
FROM vllm/vllm-openai:cu130-nightly-x86_64
# Fix the broken ass nightly build that forgot to include pandas
RUN pip install --no-cache-dir pandas
# Install LMCache for KV cache offloading / sharing across nodes
# Build with system CUDA 13.0 for Blackwell (B200)
RUN apt-get update && apt-get install -y git \
libcusolver-dev-13-0 \
libcusparse-dev-13-0 \
libcublas-dev-13-0 \
libcurand-dev-13-0 \
libcufft-dev-13-0 \
libnvjitlink-dev-13-0 && \
git clone https://github.com/biondizzle/LMCache.git /tmp/lmcache && \
cd /tmp/lmcache && \
git checkout feat/redis-ttl && \
CUDA_HOME=/usr/local/cuda \
TORCH_CUDA_ARCH_LIST="10.0" \
pip install --no-cache-dir --no-build-isolation . && \
rm -rf /tmp/lmcache
# Copy over nemotron reasonong parser
COPY ./super_v3_reasoning_parser.py /opt/super_v3_reasoning_parser.py
# Copy over deepseek tool call parser with MTP fixes
COPY deepseekv32_tool_parser.py /usr/local/lib/python3.12/dist-packages/vllm/tool_parsers/deepseekv32_tool_parser.py