Mistral common v10 (#36971)

Signed-off-by: juliendenize <julien.denize@mistral.ai>
Signed-off-by: Julien Denize <40604584+juliendenize@users.noreply.github.com>
Co-authored-by: root <root@h200-bar-196-227.slurm-bar-compute.tenant-slurm.svc.cluster.local>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk>
This commit is contained in:
Julien Denize
2026-03-14 15:26:43 +01:00
committed by GitHub
parent 4a718e770d
commit e42b49bd69
4 changed files with 22 additions and 3 deletions

View File

@@ -31,7 +31,7 @@ partial-json-parser # used for parsing partial JSON outputs
pyzmq >= 25.0.0
msgspec
gguf >= 0.17.0
mistral_common[image] >= 1.9.1
mistral_common[image] >= 1.10.0
opencv-python-headless >= 4.13.0 # required for video IO
pyyaml
six>=1.16.0; python_version > '3.11' # transitive dependency of pandas that needs to be the latest version for python 3.12

View File

@@ -95,7 +95,7 @@ transformers==4.57.5
# Pin HF Hub version
huggingface-hub==0.36.2
# Pin Mistral Common
mistral-common[image,audio]==1.9.1
mistral-common[image,audio]==1.10.0
# Required for Prithvi tests
terratorch==1.2.2
# Required for Prithvi tests

View File

@@ -482,7 +482,7 @@ mbstrdecoder==1.1.3
# typepy
mdurl==0.1.2
# via markdown-it-py
mistral-common==1.9.1
mistral-common==1.10.0
# via -r requirements/test.in
more-itertools==10.5.0
# via lm-eval

View File

@@ -7,6 +7,9 @@ from typing import TYPE_CHECKING, Any, cast, overload
from mistral_common.protocol.instruct.request import (
ChatCompletionRequest as MistralChatCompletionRequest,
)
from mistral_common.protocol.instruct.request import (
ReasoningEffort,
)
from mistral_common.protocol.instruct.tool_calls import Function, Tool
from mistral_common.protocol.instruct.validator import ValidationMode
from mistral_common.tokens.tokenizers.base import (
@@ -192,6 +195,15 @@ def validate_request_params(request: "ChatCompletionRequest"):
if request.chat_template is not None or request.chat_template_kwargs is not None:
raise ValueError("chat_template is not supported for Mistral tokenizers.")
if request.reasoning_effort and request.reasoning_effort not in list(
ReasoningEffort
):
raise ValueError(
f"reasoning_effort={request.reasoning_effort} is not supported by "
"Mistral models. Supported values are: "
f"{[e.value for e in ReasoningEffort]}."
)
def _tekken_token_to_id(tokenizer: "Tekkenizer", t: str | bytes) -> int:
assert isinstance(tokenizer, Tekkenizer), type(tokenizer)
@@ -419,6 +431,12 @@ class MistralTokenizer(TokenizerLike):
truncation = kwargs.get("truncation", False)
max_length = kwargs.get("max_length")
version_kwargs = {}
# NOTE: This is for backward compatibility.
# Transformers should be passed arguments it knows.
if self.version >= 15:
version_kwargs["reasoning_effort"] = kwargs.get("reasoning_effort")
messages, tools = _prepare_apply_chat_template_tools_and_messages(
messages, tools, continue_final_message, add_generation_prompt
)
@@ -433,6 +451,7 @@ class MistralTokenizer(TokenizerLike):
max_length=max_length,
return_tensors=None,
return_dict=False,
**version_kwargs,
)
def decode(