[Bugfix] Fix bnb quantization for models with both HF-format and Mistral-format weights (#14950)
This commit is contained in:
@@ -15,6 +15,8 @@ from ..utils import compare_two_settings, create_new_process_for_each_test
|
||||
|
||||
models_4bit_to_test = [
|
||||
("facebook/opt-125m", "quantize opt model inflight"),
|
||||
("mistralai/Mistral-7B-Instruct-v0.3",
|
||||
"quantize inflight model with both HF and Mistral format weights")
|
||||
]
|
||||
|
||||
models_pre_qaunt_4bit_to_test = [
|
||||
|
||||
Reference in New Issue
Block a user