[Hardware] Replace torch.cuda.synchronize() api with torch.accelerator.synchronize (#36085)

Signed-off-by: Kunshang Ji <kunshang.ji@intel.com>
This commit is contained in:
Kunshang Ji
2026-03-05 18:36:39 +08:00
committed by GitHub
parent 0bfa229bf1
commit 66a2209645
59 changed files with 158 additions and 161 deletions

View File

@@ -272,9 +272,9 @@ def run_moe_test(
global_num_experts=global_num_experts,
expert_map=expert_map,
)
torch.cuda.synchronize()
torch.accelerator.synchronize()
graph.replay()
torch.cuda.synchronize()
torch.accelerator.synchronize()
torch.testing.assert_close(test_output, baseline_output, atol=atol, rtol=rtol)
@@ -768,7 +768,7 @@ def test_mixtral_moe(
F.pad(vllm_moe.experts.w2_weight, (0, 128), "constant", 0)[..., 0:-128],
requires_grad=False,
)
torch.cuda.synchronize()
torch.accelerator.synchronize()
torch.accelerator.empty_cache()
# FIXME (zyongye) fix this after we move self.kernel