[Hardware] Replace torch.cuda.empty_cache with torch.accelerator.empty_cache (#30681)
Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Kunshang Ji <jikunshang95@gmail.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
@@ -530,7 +530,7 @@ def test_logprobs_mode(logprobs_mode: LogprobsMode):
|
||||
assert positive_values > 0
|
||||
finally:
|
||||
del llm
|
||||
torch.cuda.empty_cache()
|
||||
torch.accelerator.empty_cache()
|
||||
cleanup_dist_env_and_memory()
|
||||
|
||||
|
||||
@@ -1065,7 +1065,7 @@ def test_spec_decode_logprobs(
|
||||
for logprobs in output.logprobs:
|
||||
ref_logprobs.extend(logprobs.values())
|
||||
del ref_llm
|
||||
torch.cuda.empty_cache()
|
||||
torch.accelerator.empty_cache()
|
||||
cleanup_dist_env_and_memory()
|
||||
|
||||
# Run spec decode LLM.
|
||||
@@ -1095,7 +1095,7 @@ def test_spec_decode_logprobs(
|
||||
for logprobs in output.logprobs:
|
||||
spec_logprobs.extend(logprobs.values())
|
||||
del spec_llm
|
||||
torch.cuda.empty_cache()
|
||||
torch.accelerator.empty_cache()
|
||||
cleanup_dist_env_and_memory()
|
||||
|
||||
# Per-token logprobs are expected to be the same.
|
||||
|
||||
Reference in New Issue
Block a user