From 86b7e3c95a91f4a97431f8fe4381a6ab524a0cc7 Mon Sep 17 00:00:00 2001 From: liuzhenwei Date: Wed, 18 Mar 2026 13:32:59 +0800 Subject: [PATCH] [XPU] skip unsupported ut and update test_nixl_connector (#37179) Signed-off-by: zhenwei-intel Co-authored-by: Kunshang Ji --- .buildkite/scripts/hardware_ci/run-xpu-test.sh | 6 +++--- tests/v1/kv_connector/unit/test_nixl_connector.py | 8 +++++++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/.buildkite/scripts/hardware_ci/run-xpu-test.sh b/.buildkite/scripts/hardware_ci/run-xpu-test.sh index be7886354..1e72c2931 100644 --- a/.buildkite/scripts/hardware_ci/run-xpu-test.sh +++ b/.buildkite/scripts/hardware_ci/run-xpu-test.sh @@ -40,16 +40,16 @@ docker run \ python3 examples/basic/offline_inference/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager -tp 2 --distributed-executor-backend mp python3 examples/basic/offline_inference/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager --attention-backend=TRITON_ATTN python3 examples/basic/offline_inference/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager --quantization fp8 - python3 examples/basic/offline_inference/generate.py --model superjob/Qwen3-4B-Instruct-2507-GPTQ-Int4 --block-size 64 --enforce-eager + python3 examples/basic/offline_inference/generate.py --model superjob/Qwen3-4B-Instruct-2507-GPTQ-Int4 --block-size 64 --enforce-eager --max-model-len 8192 python3 examples/basic/offline_inference/generate.py --model ibm-research/PowerMoE-3b --block-size 64 --enforce-eager -tp 2 python3 examples/basic/offline_inference/generate.py --model ibm-research/PowerMoE-3b --block-size 64 --enforce-eager -tp 2 --enable-expert-parallel cd tests pytest -v -s v1/core --ignore=v1/core/test_reset_prefix_cache_e2e.py --ignore=v1/core/test_scheduler_e2e.py pytest -v -s v1/engine pytest -v -s v1/sample --ignore=v1/sample/test_logprobs.py --ignore=v1/sample/test_logprobs_e2e.py - pytest -v -s v1/worker --ignore=v1/worker/test_gpu_model_runner.py + pytest -v -s v1/worker --ignore=v1/worker/test_gpu_model_runner.py --ignore=v1/worker/test_worker_memory_snapshot.py pytest -v -s v1/structured_output pytest -v -s v1/spec_decode --ignore=v1/spec_decode/test_max_len.py --ignore=v1/spec_decode/test_tree_attention.py --ignore=v1/spec_decode/test_speculators_eagle3.py --ignore=v1/spec_decode/test_acceptance_length.py - pytest -v -s v1/kv_connector/unit --ignore=v1/kv_connector/unit/test_multi_connector.py --ignore=v1/kv_connector/unit/test_nixl_connector.py --ignore=v1/kv_connector/unit/test_example_connector.py --ignore=v1/kv_connector/unit/test_lmcache_integration.py + pytest -v -s v1/kv_connector/unit --ignore=v1/kv_connector/unit/test_multi_connector.py --ignore=v1/kv_connector/unit/test_nixl_connector.py --ignore=v1/kv_connector/unit/test_example_connector.py --ignore=v1/kv_connector/unit/test_lmcache_integration.py -k "not (test_register_kv_caches and FLASH_ATTN and True)" pytest -v -s v1/test_serial_utils.py ' diff --git a/tests/v1/kv_connector/unit/test_nixl_connector.py b/tests/v1/kv_connector/unit/test_nixl_connector.py index 095bd4c3d..53c4a751f 100644 --- a/tests/v1/kv_connector/unit/test_nixl_connector.py +++ b/tests/v1/kv_connector/unit/test_nixl_connector.py @@ -1369,7 +1369,13 @@ def test_abort_timeout_on_prefiller(monkeypatch, distributed_executor_backend): "NIXL_TELEMETRY_ENABLE": "1", }, } - ray.init(runtime_env=runtime_env) + # On XPU/ROCm, vLLM expects Ray's device key to be "GPU". + # Explicitly reserving GPU resources here prevents false negatives + # when Ray cannot auto-detect accelerator resources in test envs. + ray_init_kwargs: dict[str, Any] = {"runtime_env": runtime_env} + if not current_platform.is_cuda(): + ray_init_kwargs["num_gpus"] = 1 + ray.init(**ray_init_kwargs) try: run_test_and_cleanup() finally: