[Bugfix] Fix bnb quantization for models with both HF-format and Mistral-format weights (#14950)

This commit is contained in:
Tristan Leclercq
2025-03-18 00:27:26 +01:00
committed by GitHub
parent 18551e820c
commit 5eeabc2a44
2 changed files with 27 additions and 6 deletions

View File

@@ -15,6 +15,8 @@ from ..utils import compare_two_settings, create_new_process_for_each_test
models_4bit_to_test = [
("facebook/opt-125m", "quantize opt model inflight"),
("mistralai/Mistral-7B-Instruct-v0.3",
"quantize inflight model with both HF and Mistral format weights")
]
models_pre_qaunt_4bit_to_test = [