Files
vllm-glm/Dockerfile

22 lines
1.1 KiB
Docker
Raw Normal View History

ARG BASE_IMAGE=vllm/vllm-openai:glm51-cu130
FROM ${BASE_IMAGE}
# Patch tool parser for GLM regex fix
COPY glm4_moe_tool_parser.py /usr/local/lib/python3.12/dist-packages/vllm/tool_parsers/glm4_moe_tool_parser.py
COPY utils.py /usr/local/lib/python3.12/dist-packages/vllm/tool_parsers/utils.py
# Patch hf renderer to force string content format for GLM models
# This fixes the issue where tool response content is dropped
COPY vllm_patches/hf.py /usr/local/lib/python3.12/dist-packages/vllm/renderers/hf.py
# Install LMCache for KV cache offloading / sharing across nodes
2026-04-15 07:41:18 +00:00
# Reinstall torch with CUDA 13 support, then build LMCache with CUDA extensions
2026-04-15 07:28:47 +00:00
RUN apt-get update && apt-get install -y git && \
2026-04-15 07:41:18 +00:00
# Reinstall torch with CUDA 13.x support to match base image CUDA version
pip install --no-cache-dir --upgrade torch --index-url https://download.pytorch.org/whl/cu131 && \
# Clone and build LMCache from source with CUDA extensions
2026-04-15 07:27:01 +00:00
git clone --depth 1 https://github.com/LMCache/LMCache.git /tmp/lmcache && \
2026-04-15 07:25:23 +00:00
cd /tmp/lmcache && \
2026-04-15 07:41:18 +00:00
pip install --no-cache-dir . && \
2026-04-15 07:28:47 +00:00
rm -rf /tmp/lmcache