[cold start] replace VLLM_COMPILE_DEPYF with debug_dump_dir (#20940)

Signed-off-by: Boyuan Feng <boyuan@meta.com>
This commit is contained in:
Boyuan Feng
2025-07-14 22:02:17 -07:00
committed by GitHub
parent fc017915f5
commit 91b3d190ae
2 changed files with 7 additions and 21 deletions

View File

@@ -93,27 +93,19 @@ class TorchCompileWrapperWithCustomDispatcher:
return
self.compiled_codes.append(new_code)
local_cache_dir = self.vllm_config.compilation_config.local_cache_dir
if isinstance(local_cache_dir, str):
decompiled_file_name = ("transformed_code.py"
if envs.VLLM_COMPILE_DEPYF else
"transformed_code_README.txt")
decompiled_file = os.path.join(local_cache_dir,
decompiled_file_name)
debug_dump_dir = self.vllm_config.compilation_config.debug_dump_path
if isinstance(debug_dump_dir, str) and debug_dump_dir != "":
rank = self.vllm_config.parallel_config.rank
decompiled_file = os.path.join(debug_dump_dir, f"rank_{rank}",
"transformed_code.py")
if not os.path.exists(decompiled_file):
try:
# usually the decompilation will succeed for most models,
# as we guarantee a full-graph compilation in Dynamo.
# but there's no 100% guarantee, since decompliation is
# not a reversible process.
if envs.VLLM_COMPILE_DEPYF:
import depyf
src = depyf.decompile(new_code)
else:
src = (
"To get a transformed_code.py file, re-run with "
"VLLM_COMPILE_DEPYF=1")
import depyf
src = depyf.decompile(new_code)
with open(decompiled_file, "w") as f:
f.write(src)