29 lines
1.5 KiB
Docker
29 lines
1.5 KiB
Docker
FROM vllm/vllm-openai:v0.19.0-cu130
|
|
|
|
# Install LMCache for KV cache offloading / sharing across nodes
|
|
# Build with system CUDA 13.0 for Blackwell (B200)
|
|
RUN apt-get update && apt-get install -y git \
|
|
libcusolver-dev-13-0 \
|
|
libcusparse-dev-13-0 \
|
|
libcublas-dev-13-0 \
|
|
libcurand-dev-13-0 \
|
|
libcufft-dev-13-0 \
|
|
libnvjitlink-dev-13-0 && \
|
|
git clone https://github.com/neuralwatt/LMCache.git /tmp/lmcache && \
|
|
cd /tmp/lmcache && \
|
|
git checkout fix/mla-multi-group-kv-cache && \
|
|
CUDA_HOME=/usr/local/cuda \
|
|
TORCH_CUDA_ARCH_LIST="10.0" \
|
|
pip install --no-cache-dir --no-build-isolation . && \
|
|
rm -rf /tmp/lmcache
|
|
|
|
COPY ./super_v3_reasoning_parser.py /opt/super_v3_reasoning_parser.py
|
|
|
|
# Monkey patch more vllm stuff - https://github.com/vllm-project/vllm/pull/38237/changes#diff-bee6813076031d3ca1edc903c1b02b81e4676519afc562ce3fefe37f20c7b650
|
|
RUN sed -i "s/if self\.kv_events_config is not None:/if self.kv_events_config is not None and self.kv_events_config.enable_kv_cache_events:/" /usr/local/lib/python3.12/dist-packages/vllm/config/vllm.py
|
|
|
|
# Patch LMCacheConnectorV1 to support HMA (Hybrid Mamba/Attention KV cache manager)
|
|
# This is required for hybrid models like Nemotron that use both Mamba and Attention layers.
|
|
# Without this patch, LMCacheConnectorV1 fails with:
|
|
# "Connector LMCacheConnectorV1 does not support HMA but HMA is enabled"
|
|
COPY ./lmcache_connector.py /usr/local/lib/python3.12/dist-packages/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py |