[torch.compile] Enable attention and allreduce fusion without custom ops enabled (#24604)

Signed-off-by: Luka Govedič <lgovedic@redhat.com>
Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com>
This commit is contained in:
Luka Govedič
2025-10-17 10:10:23 -04:00
committed by GitHub
parent be429d0cfd
commit bd7157a071
28 changed files with 1519 additions and 721 deletions

View File

@@ -40,7 +40,7 @@ from vllm.utils import (
unique_filepath,
)
from ..utils import create_new_process_for_each_test
from ..utils import create_new_process_for_each_test, flat_product
def test_get_open_port(monkeypatch: pytest.MonkeyPatch):
@@ -771,3 +771,25 @@ def test_unique_filepath():
paths.add(path)
assert len(paths) == 10
assert len(list(Path(temp_dir).glob("*.txt"))) == 10
def test_flat_product():
# Check regular itertools.product behavior
result1 = list(flat_product([1, 2, 3], ["a", "b"]))
assert result1 == [
(1, "a"),
(1, "b"),
(2, "a"),
(2, "b"),
(3, "a"),
(3, "b"),
]
# check that the tuples get flattened
result2 = list(flat_product([(1, 2), (3, 4)], ["a", "b"], [(5, 6)]))
assert result2 == [
(1, 2, "a", 5, 6),
(1, 2, "b", 5, 6),
(3, 4, "a", 5, 6),
(3, 4, "b", 5, 6),
]