Convert formatting to use ruff instead of yapf + isort (#26247)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-10-05 15:06:22 +01:00
committed by GitHub
parent 17edd8a807
commit d6953beb91
1508 changed files with 115244 additions and 94146 deletions

View File

@@ -19,23 +19,27 @@ import torch
from packaging import version
from vllm.model_executor.layers.quantization.quark.quark import ( # noqa: E501
QuarkLinearMethod, QuarkW8A8Fp8, QuarkW8A8Int8)
QuarkLinearMethod,
QuarkW8A8Fp8,
QuarkW8A8Int8,
)
from vllm.platforms import current_platform
from .reference_mxfp4 import dq_mxfp4_torch, qdq_mxfp4_torch
QUARK_MXFP4_AVAILABLE = find_spec("quark") is not None and version.parse(
importlib.metadata.version("amd-quark")) >= version.parse('0.8.99')
importlib.metadata.version("amd-quark")
) >= version.parse("0.8.99")
if QUARK_MXFP4_AVAILABLE:
from quark.torch.export.nn.modules.realquantizer import (
StaticScaledRealQuantizer)
from quark.torch.export.nn.modules.realquantizer import StaticScaledRealQuantizer
from quark.torch.kernel import mx as mx_kernel
from quark.torch.quantization.config.config import FP4PerGroupSpec
try:
huggingface_hub.list_repo_refs(
"amd/Llama-3.3-70B-Instruct-WMXFP4-AMXFP4-KVFP8-Scale-UINT8-SQ")
"amd/Llama-3.3-70B-Instruct-WMXFP4-AMXFP4-KVFP8-Scale-UINT8-SQ"
)
HF_HUB_AMD_ORG_ACCESS = True
except huggingface_hub.errors.RepositoryNotFoundError:
HF_HUB_AMD_ORG_ACCESS = False
@@ -47,13 +51,13 @@ def enable_pickle(monkeypatch):
monkeypatch.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1")
@pytest.mark.parametrize('kv_cache_dtype', ['auto', 'fp8'])
@pytest.mark.parametrize('tp', [1])
@pytest.mark.parametrize("kv_cache_dtype", ["auto", "fp8"])
@pytest.mark.parametrize("tp", [1])
def test_quark_fp8_w_per_tensor_a_per_tensor(vllm_runner, kv_cache_dtype, tp):
model_path = "amd/Llama-3.1-8B-Instruct-FP8-KV-Quark-test"
with vllm_runner(model_path,
kv_cache_dtype=kv_cache_dtype,
tensor_parallel_size=tp) as llm:
with vllm_runner(
model_path, kv_cache_dtype=kv_cache_dtype, tensor_parallel_size=tp
) as llm:
def check_model(model):
layer = model.model.layers[0]
@@ -74,7 +78,7 @@ def test_quark_fp8_w_per_tensor_a_per_tensor(vllm_runner, kv_cache_dtype, tp):
assert output
@pytest.mark.parametrize('tp', [1])
@pytest.mark.parametrize("tp", [1])
def test_quark_fp8_w_per_channel_a_per_token(vllm_runner, tp):
model_path = "amd/Qwen2.5-1.5B-Instruct-ptpc-Quark-ts"
with vllm_runner(model_path, tensor_parallel_size=tp) as llm:
@@ -89,8 +93,7 @@ def test_quark_fp8_w_per_channel_a_per_token(vllm_runner, tp):
if isinstance(qkv_proj.scheme, QuarkW8A8Fp8):
assert qkv_proj.weight.dtype is current_platform.fp8_dtype()
assert qkv_proj.weight_scale.shape[0] == qkv_proj.weight.shape[
1]
assert qkv_proj.weight_scale.shape[0] == qkv_proj.weight.shape[1]
assert qkv_proj.weight_scale.shape[1] == 1
llm.apply_model(check_model)
@@ -99,7 +102,7 @@ def test_quark_fp8_w_per_channel_a_per_token(vllm_runner, tp):
assert output
@pytest.mark.parametrize('tp', [1])
@pytest.mark.parametrize("tp", [1])
def test_quark_int8_w_per_tensor_a_per_tensor(vllm_runner, tp):
model_path = "amd/Llama-3.1-8B-Instruct-w-int8-a-int8-sym-test"
with vllm_runner(model_path, tensor_parallel_size=tp) as llm:
@@ -125,16 +128,18 @@ def test_quark_fp8_parity(vllm_runner):
llm_kwargs = {
"tensor_parallel_size": 1,
"enforce_eager": True,
"gpu_memory_utilization": 0.1
"gpu_memory_utilization": 0.1,
}
with (vllm_runner(quark_model_id, **llm_kwargs) as
quark_handle, vllm_runner(fp8_model_id, **llm_kwargs) as fp8_handle):
with (
vllm_runner(quark_model_id, **llm_kwargs) as quark_handle,
vllm_runner(fp8_model_id, **llm_kwargs) as fp8_handle,
):
def get_state_dict(model):
return {k: v.cpu() for k, v in model.state_dict().items()}
quark_state_dict, = quark_handle.apply_model(get_state_dict)
fp8_state_dict, = fp8_handle.apply_model(get_state_dict)
(quark_state_dict,) = quark_handle.apply_model(get_state_dict)
(fp8_state_dict,) = fp8_handle.apply_model(get_state_dict)
assert fp8_state_dict.keys() == quark_state_dict.keys()
@@ -164,16 +169,17 @@ ACCURACY_CONFIGS = [
# Private model.
GSM8KAccuracyTestConfig(
model_name="amd/DeepSeek-R1-WMXFP4-AMXFP4-Scale-UINT8-MoE-Quant",
excepted_value=0.96),
excepted_value=0.96,
),
]
@pytest.mark.parametrize("config", ACCURACY_CONFIGS)
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE,
reason="amd-quark>=0.9 is not available")
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE, reason="amd-quark>=0.9 is not available")
@pytest.mark.skipif(
not HF_HUB_AMD_ORG_ACCESS,
reason="Read access to huggingface.co/amd is required for this test.")
reason="Read access to huggingface.co/amd is required for this test.",
)
def test_mxfp4_gsm8k_correctness(config: GSM8KAccuracyTestConfig):
if torch.cuda.device_count() < 8:
pytest.skip(
@@ -195,28 +201,26 @@ def test_mxfp4_gsm8k_correctness(config: GSM8KAccuracyTestConfig):
EXPECTED_VALUE = config.excepted_value
measured_value = results["results"][task]["exact_match,strict-match"]
assert (measured_value - rtol < EXPECTED_VALUE
and measured_value + rtol > EXPECTED_VALUE
), f"Expected: {EXPECTED_VALUE} | Measured: {measured_value}"
assert (
measured_value - rtol < EXPECTED_VALUE
and measured_value + rtol > EXPECTED_VALUE
), f"Expected: {EXPECTED_VALUE} | Measured: {measured_value}"
del os.environ["VLLM_USE_TRITON_FLASH_ATTN"]
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE,
reason="amd-quark>=0.9 is not available")
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE, reason="amd-quark>=0.9 is not available")
@pytest.mark.parametrize("float_dtype", [torch.bfloat16, torch.float16])
@pytest.mark.parametrize("scalings",
[[2.3, 0.03, 7.3, 0.1, 0.004, 17.3, 1e4, 1e-4]])
def test_mxfp4_fused_qdq_match_quark(float_dtype: torch.dtype,
scalings: list[int]):
@pytest.mark.parametrize("scalings", [[2.3, 0.03, 7.3, 0.1, 0.004, 17.3, 1e4, 1e-4]])
def test_mxfp4_fused_qdq_match_quark(float_dtype: torch.dtype, scalings: list[int]):
torch.manual_seed(0)
hidden_size = 64 * 32
inp = (torch.rand(1, hidden_size, dtype=float_dtype, device="cuda") -
0.5) * 2
inp = (torch.rand(1, hidden_size, dtype=float_dtype, device="cuda") - 0.5) * 2
for i in range(hidden_size // 32):
inp[:, i * 32:(i + 1) *
32] = inp[:, i * 32:(i + 1) * 32] * scalings[i % len(scalings)]
inp[:, i * 32 : (i + 1) * 32] = (
inp[:, i * 32 : (i + 1) * 32] * scalings[i % len(scalings)]
)
inp_kernel = inp.clone()
inp_kernel_clone = inp_kernel.clone()
@@ -225,20 +229,20 @@ def test_mxfp4_fused_qdq_match_quark(float_dtype: torch.dtype,
res_torch = qdq_mxfp4_torch(inp_kernel, "even")
for i in range(hidden_size // 32):
assert torch.all(torch.isfinite(res_hip[:, i * 32:(i + 1) * 32]))
assert torch.all(torch.isfinite(res_torch[:, i * 32:(i + 1) * 32]))
assert torch.all(torch.isfinite(res_hip[:, i * 32 : (i + 1) * 32]))
assert torch.all(torch.isfinite(res_torch[:, i * 32 : (i + 1) * 32]))
torch.testing.assert_close(res_hip[:, i * 32:(i + 1) * 32],
res_torch[:, i * 32:(i + 1) * 32])
torch.testing.assert_close(
res_hip[:, i * 32 : (i + 1) * 32], res_torch[:, i * 32 : (i + 1) * 32]
)
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE,
reason="amd-quark>=0.9 is not available")
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE, reason="amd-quark>=0.9 is not available")
@pytest.mark.parametrize("float_dtype", [torch.bfloat16, torch.float16])
@pytest.mark.parametrize("scalings",
[[2.3, 0.03, 7.3, 0.1, 0.004, 17.3, 1e4, 1e-4]])
def test_mxfp4_dequant_kernel_match_quark(float_dtype: torch.dtype,
scalings: list[int]):
@pytest.mark.parametrize("scalings", [[2.3, 0.03, 7.3, 0.1, 0.004, 17.3, 1e4, 1e-4]])
def test_mxfp4_dequant_kernel_match_quark(
float_dtype: torch.dtype, scalings: list[int]
):
qspec = FP4PerGroupSpec(
ch_axis=-1,
group_size=32,
@@ -265,8 +269,9 @@ def test_mxfp4_dequant_kernel_match_quark(float_dtype: torch.dtype,
# Make it so that different groups have different scales.
for i in range(hidden_size // 32):
w[:, i * 32:(i + 1) *
32] = w[:, i * 32:(i + 1) * 32] * scalings[i % len(scalings)]
w[:, i * 32 : (i + 1) * 32] = (
w[:, i * 32 : (i + 1) * 32] * scalings[i % len(scalings)]
)
observer(w)
scale, _ = observer._calculate_qparams()