- Tool response content was being dropped because vLLM detected 'openai' content format incorrectly for GLM templates - Added _is_glm_model() detection to force 'string' format - Updated Dockerfile to include hf.py patch - Added debug tests for tool visibility
11 lines
508 B
Docker
11 lines
508 B
Docker
ARG BASE_IMAGE=vllm/vllm-openai:glm51-cu130
|
|
FROM ${BASE_IMAGE}
|
|
|
|
# Patch tool parser for GLM regex fix
|
|
COPY glm4_moe_tool_parser.py /usr/local/lib/python3.12/dist-packages/vllm/tool_parsers/glm4_moe_tool_parser.py
|
|
COPY utils.py /usr/local/lib/python3.12/dist-packages/vllm/tool_parsers/utils.py
|
|
|
|
# Patch hf renderer to force string content format for GLM models
|
|
# This fixes the issue where tool response content is dropped
|
|
COPY vllm_patches/hf.py /usr/local/lib/python3.12/dist-packages/vllm/renderers/hf.py
|