diff --git a/vllm/v1/worker/xpu_worker.py b/vllm/v1/worker/xpu_worker.py index 421105923..102a04f5d 100644 --- a/vllm/v1/worker/xpu_worker.py +++ b/vllm/v1/worker/xpu_worker.py @@ -86,7 +86,8 @@ class XPUWorker(Worker): ) # global all_reduce needed for overall oneccl warm up - torch.distributed.all_reduce(torch.zeros(1).xpu()) + if torch.distributed.is_xccl_available(): + torch.distributed.all_reduce(torch.zeros(1).xpu()) # Set random seed. set_random_seed(self.model_config.seed)