[Misc] Update gptq_marlin to use new vLLMParameters (#7281)
This commit is contained in:
15
tests/weight_loading/models.txt
Normal file
15
tests/weight_loading/models.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
gptq_marlin, robertgshaw2/zephyr-7b-beta-channelwise-gptq, main
|
||||
gptq_marlin, TheBloke/Llama-2-7B-GPTQ, main
|
||||
gptq_marlin, TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ, main
|
||||
gptq_marlin, TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ, gptq-8bit--1g-actorder_True
|
||||
gptq_marlin, TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ, gptq-8bit-32g-actorder_True
|
||||
gptq_marlin, TechxGenus/gemma-1.1-2b-it-GPTQ, main
|
||||
compressed-tensors, nm-testing/tinyllama-oneshot-w8w8-test-static-shape-change, main
|
||||
compressed-tensors, nm-testing/tinyllama-oneshot-w8-channel-a8-tensor, main
|
||||
compressed-tensors, nm-testing/tinyllama-oneshot-w8a8-dynamic-token-v2, main
|
||||
compressed-tensors, nm-testing/tinyllama-oneshot-w8a8-channel-dynamic-token-v2, main
|
||||
compressed-tensors, nm-testing/tinyllama-oneshot-w4a16-group128-v2, main
|
||||
compressed-tensors, nm-testing/tinyllama-oneshot-w8a16-per-channel, main
|
||||
compressed-tensors, nm-testing/Meta-Llama-3-8B-FP8-compressed-tensors-test, main
|
||||
compressed-tensors, nm-testing/Phi-3-mini-128k-instruct-FP8, main
|
||||
compressed-tensors, neuralmagic/Phi-3-medium-128k-instruct-quantized.w4a16, main
|
||||
32
tests/weight_loading/run_model_weight_loading_test.sh
Normal file
32
tests/weight_loading/run_model_weight_loading_test.sh
Normal file
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
SUCCESS=0
|
||||
|
||||
IFS=$'\n' read -d '' -r -a MODEL_CONFIGS < "weight_loading/models.txt"
|
||||
|
||||
for MODEL_CONFIG in "${MODEL_CONFIGS[@]}"
|
||||
do
|
||||
LOCAL_SUCCESS=0
|
||||
IFS=', ' read -r -a array <<< "$MODEL_CONFIG"
|
||||
|
||||
echo "=== RUNNING MODEL: $MODEL_CONFIG ==="
|
||||
|
||||
export QUANTIZATION=${array[0]}
|
||||
export MODEL_NAME=${array[1]}
|
||||
export REVISION=${array[2]}
|
||||
pytest -s weight_loading/test_weight_loading.py || LOCAL_SUCCESS=$?
|
||||
|
||||
if [[ $LOCAL_SUCCESS == 0 ]]; then
|
||||
echo "=== PASSED MODEL: ${MODEL_CONFIG} ==="
|
||||
else
|
||||
echo "=== FAILED MODEL: ${MODEL_CONFIG} ==="
|
||||
fi
|
||||
|
||||
SUCCESS=$((SUCCESS + LOCAL_SUCCESS))
|
||||
|
||||
done
|
||||
|
||||
if [ "${SUCCESS}" -eq "0" ]; then
|
||||
exit 0
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
20
tests/weight_loading/test_weight_loading.py
Normal file
20
tests/weight_loading/test_weight_loading.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import os
|
||||
|
||||
MAX_MODEL_LEN = 1024
|
||||
MODEL_NAME = os.environ.get("MODEL_NAME",
|
||||
"robertgshaw2/zephyr-7b-beta-channelwise-gptq")
|
||||
REVISION = os.environ.get("REVISION", "main")
|
||||
QUANTIZATION = os.environ.get("QUANTIZATION", "gptq_marlin")
|
||||
|
||||
|
||||
def test_weight_loading(vllm_runner):
|
||||
with vllm_runner(model_name=MODEL_NAME,
|
||||
revision=REVISION,
|
||||
dtype="auto",
|
||||
quantization=QUANTIZATION,
|
||||
max_model_len=MAX_MODEL_LEN,
|
||||
tensor_parallel_size=2) as model:
|
||||
|
||||
output = model.generate_greedy("Hello world!", max_tokens=20)
|
||||
print(output)
|
||||
assert output
|
||||
Reference in New Issue
Block a user