[CI Sprint] Quantization CI Cleanup (#24130)

Signed-off-by: Alex Yun <alexyun04@gmail.com>
This commit is contained in:
Alex
2025-11-18 08:21:48 -06:00
committed by GitHub
parent 184b12fdc6
commit f6aa122698
10 changed files with 32 additions and 26 deletions

View File

@@ -21,7 +21,7 @@ MODELS = ["ai21labs/Jamba-tiny-random", "pfnet/plamo-2-1b"]
)
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("max_tokens", [10])
@pytest.mark.parametrize("max_tokens", [4])
def test_model_experts_int8_startup(
hf_runner,
vllm_runner,
@@ -33,5 +33,7 @@ def test_model_experts_int8_startup(
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
model_info.check_transformers_version(on_fail="skip")
with vllm_runner(model, dtype=dtype, quantization="experts_int8") as vllm_model:
with vllm_runner(
model, dtype=dtype, enforce_eager=True, quantization="experts_int8"
) as vllm_model:
vllm_model.generate_greedy(example_prompts, max_tokens)