[Misc] Update w2 scale loading for GPTQMarlinMoE (#12757)

This commit is contained in:
Dipika Sikka
2025-02-06 04:02:14 -05:00
committed by GitHub
parent 0408efc6d0
commit 7ca9934fe7
3 changed files with 21 additions and 8 deletions

View File

@@ -1,5 +1,7 @@
compressed-tensors, nm-testing/Mixtral-8x7B-Instruct-v0.1-W4A16-quantized, main
compressed-tensors, nm-testing/Mixtral-8x7B-Instruct-v0.1-W4A16-channel-quantized, main
compressed-tensors, nm-testing/Mixtral-8x7B-Instruct-v0.1-W8A16-quantized, main
compressed-tensors, nm-testing/test-w4a16-mixtral-actorder-group, main
gptq_marlin, TheBloke/Mixtral-8x7B-v0.1-GPTQ, main
gptq_marlin, TheBloke/Mixtral-8x7B-v0.1-GPTQ, gptq-8bit-128g-actorder_True
awq_marlin, casperhansen/deepseek-coder-v2-instruct-awq, main