[XPU] Support Triton path for LoRA operations on XPU (#28511)

Signed-off-by: Fanli Lin <fanli.lin@intel.com>
This commit is contained in:
Fanli Lin
2025-11-13 13:31:42 +08:00
committed by GitHub
parent 7dca0c90cb
commit dbbe0c756a
3 changed files with 7 additions and 1 deletions

View File

@@ -101,7 +101,11 @@ class XPUPlatform(Platform):
@classmethod
def get_punica_wrapper(cls) -> str:
return "vllm.lora.punica_wrapper.punica_xpu.PunicaWrapperXPU"
xpu_use_triton_kernel = os.getenv("XPU_USE_TRITON_KERNEL", "0") == "1"
if not xpu_use_triton_kernel:
return "vllm.lora.punica_wrapper.punica_xpu.PunicaWrapperXPU"
else:
return "vllm.lora.punica_wrapper.punica_gpu.PunicaWrapperGPU"
@classmethod
def get_device_total_memory(cls, device_id: int = 0) -> int: