[ROCm] [CI] Add new fusion test cases that are relevant to vLLM IR Ops (#34307)

Signed-off-by: tjtanaa <tunjian.tan@embeddedllm.com>
Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com>
Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com>
This commit is contained in:
TJian
2026-03-03 22:24:21 +08:00
committed by GitHub
parent ea463978bb
commit fb7fdc49c4
10 changed files with 217 additions and 61 deletions

View File

@@ -610,6 +610,8 @@ steps:
--ignore=lora/test_qwen3moe_tp.py
parallelism: 4
##### .buildkite/test_areas/pytorch.yaml #####
# corresponds to .buildkite/test_areas/pytorch.yaml
- label: PyTorch Compilation Unit Tests # 15min
timeout_in_minutes: 30
mirror_hardwares: [amdexperimental, amdproduction]
@@ -627,6 +629,20 @@ steps:
# they do not suffer from https://github.com/vllm-project/vllm/issues/28965
- "find compile/ -maxdepth 1 -name 'test_*.py' -exec pytest -s -v {} \\\\;"
# corresponds to .buildkite/test_areas/pytorch.yaml
- label: PyTorch Compilation Passes Unit Tests
timeout_in_minutes: 20
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
source_file_dependencies:
- vllm/
- tests/compile/passes
commands:
# TODO: clean up this comment if not needed. It is used to
# keep track of the tests changes during vLLM IR Ops refactoring.
# Use `find` to launch multiple instances of pytest.
- "find compile/passes -maxdepth 1 -name 'test_*.py' -exec pytest -s -v {} \\\\;"
- label: PyTorch Fullgraph Smoke Test # 15min
timeout_in_minutes: 30
mirror_hardwares: [amdexperimental, amdproduction]
@@ -1211,41 +1227,6 @@ steps:
- pytest -v -s tests/kernels/moe/test_flashinfer.py
- pytest -v -s tests/kernels/moe/test_cutedsl_moe.py
- label: Blackwell Fusion and Compile Tests # 30 min
timeout_in_minutes: 40
working_dir: "/vllm-workspace/"
gpu: b200
source_file_dependencies:
- csrc/quantization/fp4/
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
- vllm/v1/attention/backends/flashinfer.py
- vllm/v1/worker/
- vllm/v1/cudagraph_dispatcher.py
- vllm/compilation/
# can affect pattern matching
- vllm/model_executor/layers/layernorm.py
- vllm/model_executor/layers/activation.py
- vllm/model_executor/layers/quantization/input_quant_fp8.py
- tests/compile/passes/test_fusion_attn.py
- tests/compile/passes/test_silu_mul_quant_fusion.py
- tests/compile/passes/distributed/test_fusion_all_reduce.py
- tests/compile/fullgraph/test_full_graph.py
commands:
- nvidia-smi
- pytest -v -s tests/compile/passes/test_fusion_attn.py
- pytest -v -s tests/compile/passes/test_silu_mul_quant_fusion.py
# this runner has 2 GPUs available even though num_gpus=2 is not set
- pytest -v -s tests/compile/passes/distributed/test_fusion_all_reduce.py
# # Limit to Inductor partition, no custom ops, and allreduce & attn fusion to reduce running time
# # Wrap with quotes to escape yaml
# - "pytest -v -s tests/compile/distributed/test_fusions_e2e.py::test_tp2_attn_quant_allreduce_rmsnorm -k 'True and not +quant_fp8 and not +rms_norm'"
# Old E2E tests were removed in https://github.com/vllm-project/vllm/pull/33293
# in favor of new tests in fusions_e2e. We avoid replicating the new jobs in this file as it's deprecated.
# test_fp8_kv_scale_compile requires FlashAttention (not supported on default L4/L40)
- pytest -v -s tests/compile/fullgraph/test_full_graph.py::test_fp8_kv_scale_compile
- label: Blackwell GPT-OSS Eval
timeout_in_minutes: 60
working_dir: "/vllm-workspace/"
@@ -1371,7 +1352,6 @@ steps:
- pytest -v -s ./compile/test_wrapper.py
- VLLM_TEST_SAME_HOST=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep 'Same node test passed'
- VLLM_TEST_SAME_HOST=1 VLLM_TEST_WITH_DEFAULT_DEVICE_SET=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep 'Same node test passed'
- pytest -v -s compile/correctness_e2e/test_sequence_parallel.py
- CUDA_VISIBLE_DEVICES=0,1 pytest -v -s v1/shutdown
- pytest -v -s v1/worker/test_worker_memory_snapshot.py
@@ -1601,16 +1581,16 @@ steps:
commands:
- VLLM_TEST_CLEAN_GPU_MEMORY=1 pytest -v -s tests/compile/passes/distributed/test_async_tp.py
- pytest -v -s tests/compile/passes/distributed/test_sequence_parallelism.py
- pytest -v -s tests/compile/passes/distributed/test_fusion_all_reduce.py
# TODO: this test is not supported on ROCm, there are aiter kernels for this.
# - pytest -v -s tests/compile/passes/distributed/test_fusion_all_reduce.py
#- pytest -v -s tests/compile/distributed/test_fusions_e2e.py::test_tp2_attn_quant_allreduce_rmsnorm
# - "VLLM_TEST_CLEAN_GPU_MEMORY=1 pytest -v -s tests/compile/distributed/test_fusions_e2e.py -k 'not Llama-4'"
# Old E2E tests were removed in https://github.com/vllm-project/vllm/pull/33293
# in favor of new tests in fusions_e2e. We avoid replicating the new jobs in this file as it's deprecated.
- VLLM_TEST_CLEAN_GPU_MEMORY=1 pytest -v -s tests/compile/correctness_e2e/test_sequence_parallel.py
- pytest -v -s tests/distributed/test_context_parallel.py
- HIP_VISIBLE_DEVICES=0,1 VLLM_LOGGING_LEVEL=DEBUG python3 examples/offline_inference/data_parallel.py --model=Qwen/Qwen1.5-MoE-A2.7B -tp=1 -dp=2 --max-model-len=2048 --all2all-backend=allgather_reducescatter --disable-nccl-for-dp-synchronization
- pytest -v -s tests/v1/distributed/test_dbo.py
# this test is not supported on ROCm
# - pytest -v -s tests/v1/distributed/test_dbo.py
##### B200 test #####
- label: Distributed Tests (B200) # optional
@@ -1721,6 +1701,93 @@ steps:
commands:
- bash .buildkite/scripts/scheduled_integration_test/qwen3_next_mtp_async_eplb.sh 0.8 1319 8040
##### .buildkite/test_areas/compile.yaml #####
# Slowly setting up the tests so that it is also easier for the
# CI team to review and upstream to the pipelinev2.
# The following tests are important for vLLM IR Ops refactoring,
# which affects fusion passes on ROCm. So we have to
# enable them as as soon as possible.
## TODO: Enable the test in this group
# # corresponds to .buildkite/test_areas/compile.yaml
# - label: Fusion and Compile Unit Tests (2xMI325 GPUs)
# timeout_in_minutes: 20
# working_dir: "/vllm-workspace/"
# mirror_hardwares: [amdexperimental, amdproduction, tj]
# agent_pool: mi325_1 # changed to 1 GPU until the fusion all reduce is enabled then only revert back to 2 GPUs
# source_file_dependencies:
# - csrc/quantization/fp4/
# - vllm/model_executor/layers/quantization/
# - vllm/model_executor/layers/layernorm.py
# - vllm/model_executor/layers/activation.py
# - vllm/model_executor/layers/attention/attention.py
# - vllm/v1/attention/backends/flashinfer.py
# - vllm/compilation/ # TODO(luka) limit to vllm/compilation/passes
# - tests/compile/test_fusion_attn.py
# - tests/compile/test_silu_mul_quant_fusion.py
# - tests/compile/distributed/test_fusion_all_reduce.py
# - tests/compile/fullgraph/test_full_graph.py
# commands:
# - rocm-smi
# # we run all backend tests on ROCm
# # These two tests are covered in "PyTorch Compilation Passes Unit Tests"
# # - "pytest -v -s tests/compile/passes/test_fusion_attn.py"
# # - "pytest -v -s tests/compile/passes/test_silu_mul_quant_fusion.py"
# # TODO: this test is not supported on ROCm, there are aiter kernels for this.
# # - pytest -v -s tests/compile/passes/distributed/test_fusion_all_reduce.py
# # TODO: find out more details
# # - pytest -v -s tests/compile/fullgraph/test_full_graph.py::test_fp8_kv_scale_compile
# corresponds to .buildkite/test_areas/compile.yaml
- label: Fusion E2E Quick (MI325)
timeout_in_minutes: 15
working_dir: "/vllm-workspace/"
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
num_devices: 1
source_file_dependencies:
- csrc/quantization/
- vllm/model_executor/
- vllm/v1/attention/
- vllm/compilation/
- tests/compile/fusions_e2e/
commands:
- rocm-smi
# Run all models and attn backends but only Inductor partition and native custom ops
- "pytest -v -s tests/compile/fusions_e2e/test_tp1_quant.py -k 'inductor_partition and not +rms_norm and not +quant_fp8'"
# Different from CUDA, Qwen requires +rms_norm and +quant_fp8 as rms+quant fusion is only supported on AITER
- "pytest -v -s tests/compile/fusions_e2e/test_tp1_quant.py -k 'inductor_partition and +rms_norm and +quant_fp8 and qwen3'"
# corresponds to .buildkite/test_areas/compile.yaml
- label: Fusion E2E Config Sweep (MI325)
timeout_in_minutes: 30
working_dir: "/vllm-workspace/"
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
num_devices: 1
source_file_dependencies:
- csrc/quantization/
- vllm/compilation/
# can affect pattern matching
- vllm/model_executor/layers/layernorm.py
- vllm/model_executor/layers/activation.py
- vllm/model_executor/layers/attention/attention.py
- vllm/model_executor/layers/quantization/input_quant_fp8.py
- tests/compile/fusions_e2e/
commands:
- rocm-smi
# Run just llama3 (fp8) for all config combinations
- pytest -v -s tests/compile/fusions_e2e/test_tp1_quant.py -k "llama-3"
## There are no ops on ROCm for these tests.
## The test still passes but the logs are not useful.
## fused ops just call torch.ops.symm_mem which
## exists in ROCm even though they don't work
# - label: AsyncTP Correctness Tests (2xMI325 GPUs)
# - label: Fusion E2E TP2 Quick (MI325)
# - label: Fusion E2E TP2 AsyncTP Config Sweep (MI325)
# - label: Fusion E2E TP2 (MI325)
# - label: Sequence Parallel Correctness Tests (2xMI325 GPUs)
#####################################################################################################################################