2026-04-08 18:23:12 +00:00
|
|
|
ARG BASE_IMAGE=vllm/vllm-openai:glm51-cu130
|
|
|
|
|
FROM ${BASE_IMAGE}
|
|
|
|
|
|
2026-04-09 05:20:47 +00:00
|
|
|
# Patch tool parser for GLM regex fix
|
2026-04-08 18:23:12 +00:00
|
|
|
COPY glm4_moe_tool_parser.py /usr/local/lib/python3.12/dist-packages/vllm/tool_parsers/glm4_moe_tool_parser.py
|
|
|
|
|
COPY utils.py /usr/local/lib/python3.12/dist-packages/vllm/tool_parsers/utils.py
|
2026-04-09 05:20:47 +00:00
|
|
|
|
|
|
|
|
# Patch hf renderer to force string content format for GLM models
|
|
|
|
|
# This fixes the issue where tool response content is dropped
|
|
|
|
|
COPY vllm_patches/hf.py /usr/local/lib/python3.12/dist-packages/vllm/renderers/hf.py
|
2026-04-15 04:43:05 +00:00
|
|
|
|
|
|
|
|
# Install LMCache for KV cache offloading / sharing across nodes
|
2026-04-15 07:25:23 +00:00
|
|
|
# Build from source to match CUDA 13 (pip wheel is CUDA 12)
|
2026-04-15 07:28:47 +00:00
|
|
|
RUN apt-get update && apt-get install -y git && \
|
2026-04-15 07:27:01 +00:00
|
|
|
git clone --depth 1 https://github.com/LMCache/LMCache.git /tmp/lmcache && \
|
2026-04-15 07:25:23 +00:00
|
|
|
cd /tmp/lmcache && \
|
2026-04-15 07:28:47 +00:00
|
|
|
pip install --no-cache-dir -e . && \
|
|
|
|
|
rm -rf /tmp/lmcache
|