[Misc] Bump opencv-python dependecy version to 4.13 (#32668)

Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn>
(cherry picked from commit 444e2e7e1f)
This commit is contained in:
Isotr0py
2026-01-22 23:51:15 +08:00
committed by khluu
parent f46d576c54
commit 2bd95d803a
5 changed files with 37 additions and 16 deletions

View File

@@ -32,7 +32,7 @@ pyzmq >= 25.0.0
msgspec msgspec
gguf >= 0.17.0 gguf >= 0.17.0
mistral_common[image] >= 1.8.8 mistral_common[image] >= 1.8.8
opencv-python-headless >= 4.11.0 # required for video IO opencv-python-headless >= 4.13.0 # required for video IO
pyyaml pyyaml
six>=1.16.0; python_version > '3.11' # transitive dependency of pandas that needs to be the latest version for python 3.12 six>=1.16.0; python_version > '3.11' # transitive dependency of pandas that needs to be the latest version for python 3.12
setuptools>=77.0.3,<81.0.0; python_version > '3.11' # Setuptools is used by triton, we need to ensure a modern version is installed for 3.12+ so that it does not try to import distutils, which was removed in 3.12 setuptools>=77.0.3,<81.0.0; python_version > '3.11' # Setuptools is used by triton, we need to ensure a modern version is installed for 3.12+ so that it does not try to import distutils, which was removed in 3.12

View File

@@ -25,7 +25,7 @@ transformers_stream_generator # required for qwen-vl test
matplotlib # required for qwen-vl test matplotlib # required for qwen-vl test
mistral_common[image,audio] >= 1.8.8 # required for voxtral test mistral_common[image,audio] >= 1.8.8 # required for voxtral test
num2words # required for smolvlm test num2words # required for smolvlm test
opencv-python-headless >= 4.11.0 # required for video test opencv-python-headless >= 4.13.0 # required for video test
datamodel_code_generator # required for minicpm3 test datamodel_code_generator # required for minicpm3 test
lm-eval[api]>=0.4.9.2 # required for model evaluation test lm-eval[api]>=0.4.9.2 # required for model evaluation test
mteb>=1.38.11, <2 # required for mteb test mteb>=1.38.11, <2 # required for mteb test
@@ -37,8 +37,8 @@ bitsandbytes>=0.46.1
buildkite-test-collector==0.1.9 buildkite-test-collector==0.1.9
genai_perf==0.0.8 genai_perf>=0.0.8
tritonclient==2.51.0 tritonclient>=2.51.0
numba == 0.61.2 # Required for N-gram speculative decoding numba == 0.61.2 # Required for N-gram speculative decoding
numpy numpy

View File

@@ -33,7 +33,7 @@ matplotlib # required for qwen-vl test
mistral_common[image,audio] >= 1.8.8 # required for voxtral test mistral_common[image,audio] >= 1.8.8 # required for voxtral test
num2words # required for smolvlm test num2words # required for smolvlm test
open_clip_torch==2.32.0 # Required for nemotron_vl test, Nemotron Parse in test_common.py open_clip_torch==2.32.0 # Required for nemotron_vl test, Nemotron Parse in test_common.py
opencv-python-headless >= 4.11.0 # required for video test opencv-python-headless >= 4.13.0 # required for video test
datamodel_code_generator # required for minicpm3 test datamodel_code_generator # required for minicpm3 test
lm-eval[api]>=0.4.9.2 # required for model evaluation test lm-eval[api]>=0.4.9.2 # required for model evaluation test
mteb[bm25s]>=2, <3 # required for mteb test mteb[bm25s]>=2, <3 # required for mteb test
@@ -45,8 +45,8 @@ bitsandbytes==0.46.1
buildkite-test-collector==0.1.9 buildkite-test-collector==0.1.9
genai_perf==0.0.8 genai_perf>=0.0.8
tritonclient==2.51.0 tritonclient>=2.51.0
arctic-inference == 0.1.1 # Required for suffix decoding test arctic-inference == 0.1.1 # Required for suffix decoding test
numba == 0.61.2 # Required for N-gram speculative decoding numba == 0.61.2 # Required for N-gram speculative decoding

View File

@@ -31,7 +31,11 @@ albumentations==1.4.6
# -r requirements/test.in # -r requirements/test.in
# terratorch # terratorch
alembic==1.16.4 alembic==1.16.4
# via mlflow # via
# mlflow
# optuna
annotated-doc==0.0.4
# via fastapi
annotated-types==0.7.0 annotated-types==0.7.0
# via pydantic # via pydantic
antlr4-python3-runtime==4.9.3 antlr4-python3-runtime==4.9.3
@@ -143,6 +147,8 @@ colorama==0.4.6
# tqdm-multiprocess # tqdm-multiprocess
colorful==0.5.6 colorful==0.5.6
# via ray # via ray
colorlog==6.10.1
# via optuna
contourpy==1.3.0 contourpy==1.3.0
# via matplotlib # via matplotlib
coverage==7.10.6 coverage==7.10.6
@@ -250,7 +256,7 @@ fsspec==2024.9.0
# torch # torch
ftfy==6.3.1 ftfy==6.3.1
# via open-clip-torch # via open-clip-torch
genai-perf==0.0.8 genai-perf==0.0.16
# via -r requirements/test.in # via -r requirements/test.in
genson==1.3.0 genson==1.3.0
# via datamodel-code-generator # via datamodel-code-generator
@@ -387,6 +393,7 @@ jinja2==3.1.6
# via # via
# datamodel-code-generator # datamodel-code-generator
# flask # flask
# genai-perf
# mlflow # mlflow
# torch # torch
jiwer==3.0.5 jiwer==3.0.5
@@ -526,7 +533,7 @@ numba==0.61.2
# librosa # librosa
numexpr==2.10.1 numexpr==2.10.1
# via lm-eval # via lm-eval
numpy==1.26.4 numpy==2.2.6
# via # via
# -r requirements/test.in # -r requirements/test.in
# accelerate # accelerate
@@ -556,6 +563,7 @@ numpy==1.26.4
# numba # numba
# numexpr # numexpr
# opencv-python-headless # opencv-python-headless
# optuna
# pandas # pandas
# patsy # patsy
# peft # peft
@@ -635,7 +643,7 @@ opencensus==0.11.4
# via ray # via ray
opencensus-context==0.1.3 opencensus-context==0.1.3
# via opencensus # via opencensus
opencv-python-headless==4.11.0.86 opencv-python-headless==4.13.0.90
# via # via
# -r requirements/test.in # -r requirements/test.in
# albucore # albucore
@@ -658,6 +666,10 @@ opentelemetry-sdk==1.35.0
# ray # ray
opentelemetry-semantic-conventions==0.56b0 opentelemetry-semantic-conventions==0.56b0
# via opentelemetry-sdk # via opentelemetry-sdk
optuna==3.6.1
# via genai-perf
orjson==3.11.5
# via genai-perf
packaging==24.2 packaging==24.2
# via # via
# accelerate # accelerate
@@ -676,6 +688,7 @@ packaging==24.2
# lightning-utilities # lightning-utilities
# matplotlib # matplotlib
# mlflow-skinny # mlflow-skinny
# optuna
# peft # peft
# plotly # plotly
# pooch # pooch
@@ -715,6 +728,8 @@ peft==0.16.0
# lm-eval # lm-eval
perceptron==0.1.4 perceptron==0.1.4
# via -r requirements/test.in # via -r requirements/test.in
perf-analyzer==0.1.0
# via genai-perf
pillow==10.4.0 pillow==10.4.0
# via # via
# genai-perf # genai-perf
@@ -901,6 +916,7 @@ pyyaml==6.0.2
# lightning # lightning
# mlflow-skinny # mlflow-skinny
# omegaconf # omegaconf
# optuna
# peft # peft
# pytorch-lightning # pytorch-lightning
# ray # ray
@@ -1063,6 +1079,7 @@ sortedcontainers==2.4.0
soundfile==0.12.1 soundfile==0.12.1
# via # via
# -r requirements/test.in # -r requirements/test.in
# genai-perf
# librosa # librosa
# mistral-common # mistral-common
soxr==0.5.0.post1 soxr==0.5.0.post1
@@ -1073,6 +1090,7 @@ sqlalchemy==2.0.41
# via # via
# alembic # alembic
# mlflow # mlflow
# optuna
sqlitedict==2.1.0 sqlitedict==2.1.0
# via lm-eval # via lm-eval
sqlparse==0.5.3 sqlparse==0.5.3
@@ -1202,6 +1220,7 @@ tqdm==4.66.6
# mteb # mteb
# nltk # nltk
# open-clip-torch # open-clip-torch
# optuna
# peft # peft
# pqdm # pqdm
# pretrainedmodels # pretrainedmodels
@@ -1224,10 +1243,8 @@ transformers-stream-generator==0.0.5
# via -r requirements/test.in # via -r requirements/test.in
triton==3.5.1 triton==3.5.1
# via torch # via torch
tritonclient==2.51.0 tritonclient==2.64.0
# via # via -r requirements/test.in
# -r requirements/test.in
# genai-perf
typepy==1.3.2 typepy==1.3.2
# via # via
# dataproperty # dataproperty

View File

@@ -267,12 +267,16 @@ async def test_audio_with_max_tokens(mary_had_lamb, client_and_model):
out_tokens = tok(out_text, add_special_tokens=False)["input_ids"] out_tokens = tok(out_text, add_special_tokens=False)["input_ids"]
assert len(out_tokens) == 1 assert len(out_tokens) == 1
# max_completion_tokens > max_model_len # max_completion_tokens > max_model_len
# max_model_len=32768 for Gemma-3n-E2B-it
transcription = await client.audio.transcriptions.create( transcription = await client.audio.transcriptions.create(
model=model_name, model=model_name,
file=mary_had_lamb, file=mary_had_lamb,
response_format="text", response_format="text",
temperature=0.0, temperature=0.0,
extra_body={"max_completion_tokens": int(1e6)}, extra_body={
"max_completion_tokens": int(1e6),
"repetition_penalty": 1.3,
},
) )
out = json.loads(transcription) out = json.loads(transcription)
out_text = out["text"] out_text = out["text"]