[CI] Prune Quantization Tests and skip compilation (#27038)

Signed-off-by: mgoin <mgoin64@gmail.com>
This commit is contained in:
Michael Goin
2025-10-16 17:26:35 -04:00
committed by GitHub
parent b3dda72c23
commit 01c977e96d
9 changed files with 62 additions and 134 deletions

View File

@@ -16,13 +16,6 @@ from ..utils import compare_two_settings
reason="fp8 is not supported on this GPU type.",
)
def test_cpu_offload_fp8():
# Test quantization of an unquantized checkpoint
compare_two_settings(
"meta-llama/Llama-3.2-1B-Instruct",
["--quantization", "fp8"],
["--quantization", "fp8", "--cpu-offload-gb", "1"],
max_wait_seconds=480,
)
# Test loading a quantized checkpoint
compare_two_settings(
"neuralmagic/Qwen2-1.5B-Instruct-FP8",
@@ -46,13 +39,6 @@ def test_cpu_offload_gptq(monkeypatch):
["--cpu-offload-gb", "1"],
max_wait_seconds=480,
)
# Test GPTQ
compare_two_settings(
"Qwen/Qwen2-1.5B-Instruct-GPTQ-Int4",
["--quantization", "gptq"],
["--quantization", "gptq", "--cpu-offload-gb", "1"],
max_wait_seconds=480,
)
@pytest.mark.skipif(
@@ -69,13 +55,6 @@ def test_cpu_offload_awq(monkeypatch):
["--cpu-offload-gb", "1"],
max_wait_seconds=480,
)
# Test AWQ
compare_two_settings(
"Qwen/Qwen2-1.5B-Instruct-AWQ",
["--quantization", "awq"],
["--quantization", "awq", "--cpu-offload-gb", "1"],
max_wait_seconds=480,
)
@pytest.mark.skipif(
@@ -92,17 +71,3 @@ def test_cpu_offload_compressed_tensors(monkeypatch):
["--cpu-offload-gb", "1"],
max_wait_seconds=480,
)
# Test w4a16_marlin24
compare_two_settings(
"nm-testing/llama7b-one-shot-2_4-w4a16-marlin24-t",
[],
["--cpu-offload-gb", "1"],
max_wait_seconds=480,
)
# Test w8a8
compare_two_settings(
"nm-testing/tinyllama-oneshot-w8w8-test-static-shape-change",
[],
["--cpu-offload-gb", "1"],
max_wait_seconds=480,
)