Files
vllm-with-lmcache/Dockerfile

18 lines
657 B
Docker
Raw Normal View History

2026-04-15 10:15:23 +00:00
FROM vllm/vllm-openai:v0.19.0-cu130
# Install LMCache for KV cache offloading / sharing across nodes
# Build with system CUDA 13.0 for Blackwell (B200)
RUN apt-get update && apt-get install -y git \
libcusolver-dev-13-0 \
libcusparse-dev-13-0 \
libcublas-dev-13-0 \
libcurand-dev-13-0 \
libcufft-dev-13-0 \
libnvjitlink-dev-13-0 && \
2026-04-15 11:21:32 +00:00
git clone https://github.com/neuralwatt/LMCache.git /tmp/lmcache && \
2026-04-15 10:15:23 +00:00
cd /tmp/lmcache && \
2026-04-15 11:21:32 +00:00
git checkout fix/mla-multi-group-kv-cache && \
2026-04-15 10:15:23 +00:00
CUDA_HOME=/usr/local/cuda \
TORCH_CUDA_ARCH_LIST="10.0" \
pip install --no-cache-dir --no-build-isolation . && \
rm -rf /tmp/lmcache