[CI] Add Helion as an optional dependency (#32482)

Signed-off-by: Yanan Cao <gmagogsfm@gmail.com>
This commit is contained in:
Yanan Cao
2026-01-19 11:09:56 -08:00
committed by GitHub
parent 0727cc9ecf
commit 9d1e611f0e
5 changed files with 83 additions and 0 deletions

View File

@@ -703,6 +703,17 @@ steps:
- pytest -v -s kernels/moe/test_batched_deepgemm.py
- pytest -v -s kernels/attention/test_deepgemm_attention.py
- label: Kernels Helion Test
timeout_in_minutes: 30
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
source_file_dependencies:
- vllm/utils/import_utils.py
- tests/kernels/helion/
commands:
- pip install helion
- pytest -v -s kernels/helion/
- label: Model Executor Test # 23min
timeout_in_minutes: 35
torch_nightly: true

View File

@@ -624,6 +624,16 @@ steps:
- pytest -v -s kernels/moe/test_batched_deepgemm.py
- pytest -v -s kernels/attention/test_deepgemm_attention.py
- label: Kernels Helion Test
timeout_in_minutes: 30
gpu: h100
source_file_dependencies:
- vllm/utils/import_utils.py
- tests/kernels/helion/
commands:
- pip install helion
- pytest -v -s kernels/helion/
- label: Model Executor Test # 23min
timeout_in_minutes: 35
torch_nightly: true

View File

@@ -992,6 +992,8 @@ setup(
"flashinfer": [], # Kept for backwards compatibility
# Optional deps for AMD FP4 quantization support
"petit-kernel": ["petit-kernel"],
# Optional deps for Helion kernel development
"helion": ["helion"],
},
cmdclass=cmdclass,
package_data=package_data,

View File

@@ -0,0 +1,45 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Tests for Helion kernel availability and basic functionality.
This module demonstrates the pattern for testing optional Helion kernels.
Tests in this directory will be skipped if Helion is not installed.
"""
import pytest
from vllm.utils.import_utils import has_helion
# Skip entire module if helion is not available
if not has_helion():
pytest.skip(
"Helion is not installed. Install with: pip install vllm[helion]",
allow_module_level=True,
)
import helion
import helion.language as hl
import torch
def test_helion_kernel_compilation_smoke():
"""Smoke test: compile and run a simple Helion kernel."""
@helion.kernel(autotune_effort="none")
def add_kernel(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
out = torch.empty_like(x)
for tile in hl.tile(x.size()):
out[tile] = x[tile] + y[tile]
return out
# Create test tensors
x = torch.randn(1024, device="cuda", dtype=torch.float32)
y = torch.randn(1024, device="cuda", dtype=torch.float32)
# Run the helion kernel
result = add_kernel(x, y)
# Verify correctness
expected = x + y
assert torch.allclose(result, expected), "Helion kernel output mismatch"

View File

@@ -436,3 +436,18 @@ def has_arctic_inference() -> bool:
"""Whether the optional `arctic_inference` package is available."""
return _has_module("arctic_inference")
def has_helion() -> bool:
"""Whether the optional `helion` package is available.
Helion is a Python-embedded DSL for writing ML kernels.
See: https://github.com/pytorch/helion
Usage:
if has_helion():
import helion
import helion.language as hl
# use helion...
"""
return _has_module("helion")