[platform] support custom torch.compile backend key (#11318)

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Signed-off-by: youkaichao <youkaichao@gmail.com>
Co-authored-by: youkaichao <youkaichao@gmail.com>
This commit is contained in:
wangxiyuan
2025-01-10 23:46:51 +08:00
committed by GitHub
parent 12664ddda5
commit 20410b2fda
5 changed files with 14 additions and 5 deletions

View File

@@ -45,6 +45,7 @@ from vllm.model_executor.model_loader.weight_utils import (
row_parallel_weight_loader)
from vllm.model_executor.sampling_metadata import SamplingMetadata
from vllm.model_executor.utils import set_weight_attrs
from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
from .interfaces import SupportsLoRA, SupportsPP
@@ -53,7 +54,7 @@ from .utils import (extract_layer_index, is_pp_missing_parameter,
maybe_prefix)
@torch.compile
@torch.compile(backend=current_platform.simple_compile_backend)
def layer_norm_func(hidden_states, weight, variance_epsilon):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)