Compare commits
829 Commits
v0.15.2rc0
...
v0.17.0rc0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
097eb544e9 | ||
|
|
7cdba98edf | ||
|
|
3c85cd9d74 | ||
|
|
edba15045a | ||
|
|
e379396167 | ||
|
|
6e9f21e8a2 | ||
|
|
c1d963403c | ||
|
|
77e6dcbbfa | ||
|
|
70c73df69e | ||
|
|
9a9d442464 | ||
|
|
f7da9cdffc | ||
|
|
f22ff2958c | ||
|
|
d15c3b90fc | ||
|
|
97286a20ed | ||
|
|
12b38c0f45 | ||
|
|
467886a0c4 | ||
|
|
a9b8b13e5c | ||
|
|
e7213003cb | ||
|
|
3a8eef5869 | ||
|
|
97995f6376 | ||
|
|
881a6b011b | ||
|
|
8e1fd5baf0 | ||
|
|
ae88468bcc | ||
|
|
e05cb3b93e | ||
|
|
28ef9ba399 | ||
|
|
fb7fdc49c4 | ||
|
|
ea463978bb | ||
|
|
440f0e7dc6 | ||
|
|
fd4a90f337 | ||
|
|
ad9d09e2b8 | ||
|
|
4beebfd146 | ||
|
|
b8401cde0e | ||
|
|
5dfc5abe94 | ||
|
|
8fa68a8ce4 | ||
|
|
35a6f0bfe2 | ||
|
|
3a6cbf16e2 | ||
|
|
f44d1ddc8c | ||
|
|
48a54c1e0d | ||
|
|
8b9e8b7454 | ||
|
|
c21d0039ec | ||
|
|
7d8bbe6f42 | ||
|
|
25e02647c2 | ||
|
|
a0a5178ab4 | ||
|
|
8ea8ba275e | ||
|
|
4f85bae9d6 | ||
|
|
0a7165fd71 | ||
|
|
6521ccf286 | ||
|
|
8ebd872f50 | ||
|
|
168ee03e1c | ||
|
|
9dd656f0ea | ||
|
|
c8b678e53e | ||
|
|
18c29c746b | ||
|
|
96fc09503a | ||
|
|
1b82b433fc | ||
|
|
9319044ee9 | ||
|
|
c42dc402c1 | ||
|
|
fa6a6be519 | ||
|
|
cad21918e3 | ||
|
|
53700bf49b | ||
|
|
a13d8c03c9 | ||
|
|
9433acb8df | ||
|
|
d1a6e96d9e | ||
|
|
2a9e3347e9 | ||
|
|
cc0d565f40 | ||
|
|
358e4d5ba7 | ||
|
|
792a74b973 | ||
|
|
4034c3d32e | ||
|
|
7560d674c9 | ||
|
|
d9c7730877 | ||
|
|
ada4f4fadd | ||
|
|
7e9149d9a9 | ||
|
|
87c98b0236 | ||
|
|
de7dd634b9 | ||
|
|
9a87b0578f | ||
|
|
510bc9e1df | ||
|
|
cbd361fd46 | ||
|
|
c212202d93 | ||
|
|
ec27b36b4b | ||
|
|
3fd1d4ec2c | ||
|
|
cb21972a97 | ||
|
|
c34963f138 | ||
|
|
f26650d649 | ||
|
|
92f5d0f070 | ||
|
|
a60985b07e | ||
|
|
8b5014d3dd | ||
|
|
57a96e26c9 | ||
|
|
e82fbeec7b | ||
|
|
6290470843 | ||
|
|
72f4d16262 | ||
|
|
5a435507d8 | ||
|
|
59d7af9c6c | ||
|
|
bbf81f9a92 | ||
|
|
da543d1abe | ||
|
|
87d319c52f | ||
|
|
a9ec392c86 | ||
|
|
afd089f231 | ||
|
|
3ecd0bf9fc | ||
|
|
e3eb146f7a | ||
|
|
95a395dbec | ||
|
|
e94b263bd6 | ||
|
|
e113a30113 | ||
|
|
1dafb29f91 | ||
|
|
49b9ae32e9 | ||
|
|
63d7972f13 | ||
|
|
c68e69f144 | ||
|
|
7e08c22b8c | ||
|
|
8e75d88554 | ||
|
|
0892d1ab1f | ||
|
|
7600642eae | ||
|
|
1e69c04887 | ||
|
|
4292e3b807 | ||
|
|
24d6ea8afd | ||
|
|
57c86c0741 | ||
|
|
06254d4cbb | ||
|
|
f5d1281c9d | ||
|
|
94029ffaf0 | ||
|
|
88e8525f2e | ||
|
|
b2d8b422b2 | ||
|
|
1d5ab5d603 | ||
|
|
7b346ba8ed | ||
|
|
dea268336f | ||
|
|
90805ff464 | ||
|
|
2562e0271e | ||
|
|
fd68cd132b | ||
|
|
0edf101d2b | ||
|
|
d5b6f3ba36 | ||
|
|
1a014a0a93 | ||
|
|
86ac7bcf84 | ||
|
|
405f28d38d | ||
|
|
5323672bc2 | ||
|
|
a201ad72d8 | ||
|
|
e3691988d0 | ||
|
|
9fa6c68fa6 | ||
|
|
2ce6f3cf67 | ||
|
|
1f3dbd95fd | ||
|
|
1d532f9d8f | ||
|
|
234a65b781 | ||
|
|
2decec9856 | ||
|
|
29b35477b0 | ||
|
|
b1d9f5372d | ||
|
|
fd6de37fca | ||
|
|
c8aca0c9e1 | ||
|
|
b602e4f299 | ||
|
|
157722da75 | ||
|
|
1d897ff04f | ||
|
|
905d76b51d | ||
|
|
9098ce690c | ||
|
|
876312f0b5 | ||
|
|
5de98abc12 | ||
|
|
9251ed5c4f | ||
|
|
e8249378e4 | ||
|
|
6d4f9d3ad5 | ||
|
|
fbe3f0120a | ||
|
|
66c1751d13 | ||
|
|
6467b635b6 | ||
|
|
9c3fe9936b | ||
|
|
b66a74649e | ||
|
|
07bdabef03 | ||
|
|
a572baff5e | ||
|
|
516cf26698 | ||
|
|
487e5c51f7 | ||
|
|
1a8c71674e | ||
|
|
062b789632 | ||
|
|
a532c83849 | ||
|
|
1e5ad9b74f | ||
|
|
cabdaa7619 | ||
|
|
06be53563b | ||
|
|
c29ee9c326 | ||
|
|
d43048ce05 | ||
|
|
4fec53cfcb | ||
|
|
38c498b8e3 | ||
|
|
56a6371706 | ||
|
|
6283021142 | ||
|
|
01923eec70 | ||
|
|
31fb6f43da | ||
|
|
eb19955c37 | ||
|
|
0f2f24c8b2 | ||
|
|
d0105b84f0 | ||
|
|
832a780f3a | ||
|
|
98217b09f9 | ||
|
|
967572dd5f | ||
|
|
3d66502e1b | ||
|
|
c66aa48e99 | ||
|
|
b6d5a17298 | ||
|
|
5e58bdc711 | ||
|
|
a1f53addb1 | ||
|
|
05970c772c | ||
|
|
d940607629 | ||
|
|
99c7892c5b | ||
|
|
ec8f943db1 | ||
|
|
f2ad952f40 | ||
|
|
9e2cabdf9c | ||
|
|
ec8ab9d254 | ||
|
|
05972ea7e5 | ||
|
|
111d869069 | ||
|
|
7fea7250a4 | ||
|
|
845ee348ef | ||
|
|
ec13e549d3 | ||
|
|
c6ca51598a | ||
|
|
c0615a296d | ||
|
|
01914445b0 | ||
|
|
5281713e11 | ||
|
|
32693db8ce | ||
|
|
e03ddcfbd4 | ||
|
|
02acd16861 | ||
|
|
ab87f85231 | ||
|
|
3827c8c55a | ||
|
|
ade81f17fe | ||
|
|
6042e66cd5 | ||
|
|
9f9a675b23 | ||
|
|
a07c4c5939 | ||
|
|
d3a51da92a | ||
|
|
186ea22efe | ||
|
|
4a9c07a0a2 | ||
|
|
9d37941017 | ||
|
|
4171ff6dd9 | ||
|
|
13025e71e8 | ||
|
|
71dfce6aa6 | ||
|
|
2aa4140402 | ||
|
|
86c3b5a808 | ||
|
|
160424a937 | ||
|
|
9511a3f8ee | ||
|
|
de527e1cec | ||
|
|
1976356ee6 | ||
|
|
cbf8f7028c | ||
|
|
6831650c40 | ||
|
|
ed42507f6d | ||
|
|
9571e99945 | ||
|
|
c97234c08b | ||
|
|
b188bab441 | ||
|
|
15d76f74e2 | ||
|
|
8fd6975479 | ||
|
|
5d18bf8b32 | ||
|
|
0788ff0a15 | ||
|
|
d72b0be33c | ||
|
|
42489e43c2 | ||
|
|
af5e6afa0a | ||
|
|
ee59a7c615 | ||
|
|
709eadbb0b | ||
|
|
90fc7f9109 | ||
|
|
675ec59aa9 | ||
|
|
80e60a6133 | ||
|
|
26e722f906 | ||
|
|
2c619e5e3f | ||
|
|
8a685be8d9 | ||
|
|
2465071510 | ||
|
|
cd43673668 | ||
|
|
35d44b4557 | ||
|
|
8ad54a991b | ||
|
|
92510edc32 | ||
|
|
a6c137521c | ||
|
|
4572a06afe | ||
|
|
5cc29cfb8b | ||
|
|
8fae54faff | ||
|
|
f7967577f5 | ||
|
|
af770b8e7b | ||
|
|
2ff3e436ad | ||
|
|
c2c4c4611a | ||
|
|
f38f8c9742 | ||
|
|
ec1d30c0f6 | ||
|
|
e3b2324ec4 | ||
|
|
dbf0da817a | ||
|
|
3bbb2046ff | ||
|
|
576fe50333 | ||
|
|
a0e50a4260 | ||
|
|
9fa5b25a23 | ||
|
|
ea97750414 | ||
|
|
067c5d9ad1 | ||
|
|
f5972a872f | ||
|
|
a9e15e040d | ||
|
|
542ca66357 | ||
|
|
fc8456c336 | ||
|
|
9ce8fad2a9 | ||
|
|
c38b8d5a31 | ||
|
|
60da0e1544 | ||
|
|
9609b1f18d | ||
|
|
a0c7081695 | ||
|
|
34ce0ffd1f | ||
|
|
0de5333989 | ||
|
|
a87cc50859 | ||
|
|
761e63e541 | ||
|
|
d12d201409 | ||
|
|
b3ad37c5db | ||
|
|
14561fabfd | ||
|
|
c77f3e1207 | ||
|
|
012dee9233 | ||
|
|
f1c664545b | ||
|
|
c870eb9e0f | ||
|
|
6af03f2394 | ||
|
|
1a6cf39dec | ||
|
|
f91808ae0d | ||
|
|
33a0d43c71 | ||
|
|
80d93fd6da | ||
|
|
ec85340531 | ||
|
|
2ff4e51152 | ||
|
|
95642441d0 | ||
|
|
a7c9f7b7ec | ||
|
|
a4bd661fb3 | ||
|
|
3ef9fd0f98 | ||
|
|
22a97e6613 | ||
|
|
596ed1f02e | ||
|
|
b8d8b7e934 | ||
|
|
28c5e69ba0 | ||
|
|
864167d376 | ||
|
|
a2ba6a5244 | ||
|
|
c4f38696f7 | ||
|
|
a7f341c323 | ||
|
|
d13ece38d7 | ||
|
|
5cc7c4452e | ||
|
|
b95bb6927f | ||
|
|
392645454b | ||
|
|
1e8438a89a | ||
|
|
8435b2e049 | ||
|
|
b1b5e045df | ||
|
|
5f68464f92 | ||
|
|
aa08a30fc9 | ||
|
|
7f40e9e516 | ||
|
|
103e614b14 | ||
|
|
54e2f83d0a | ||
|
|
e631f8e78e | ||
|
|
e97c46a92d | ||
|
|
7291d1b288 | ||
|
|
987506bca6 | ||
|
|
c645e9a214 | ||
|
|
944ffb5968 | ||
|
|
2bcf71b9c0 | ||
|
|
b7892a3bef | ||
|
|
682566b18e | ||
|
|
b9c2a565cc | ||
|
|
dd8c3a7fb2 | ||
|
|
a8a47c17b6 | ||
|
|
40f88d8318 | ||
|
|
2cbf9656ce | ||
|
|
30132cd144 | ||
|
|
cbd95a2dd1 | ||
|
|
970861ac0c | ||
|
|
d24bdd7c4b | ||
|
|
d403c1da1c | ||
|
|
b71fbd06e2 | ||
|
|
74d90b1ce4 | ||
|
|
a4047d4ea9 | ||
|
|
965fe45935 | ||
|
|
98b0205c3c | ||
|
|
272b535ab3 | ||
|
|
f74f1572ca | ||
|
|
bebfe55b1c | ||
|
|
820d7815eb | ||
|
|
ab6f3487a6 | ||
|
|
8dc8a99b56 | ||
|
|
2aab2bb543 | ||
|
|
54254f7a61 | ||
|
|
cf93c1a128 | ||
|
|
89358f0d35 | ||
|
|
a0fe7ea2f0 | ||
|
|
991d6bff38 | ||
|
|
5719a4e4e6 | ||
|
|
11be2c74dc | ||
|
|
7a5adad480 | ||
|
|
59c6233297 | ||
|
|
d38cd3dde5 | ||
|
|
ded333fb9b | ||
|
|
9d7577b2bd | ||
|
|
e739c29ea4 | ||
|
|
a55caf6ae9 | ||
|
|
0e22cd618b | ||
|
|
ea5f903f80 | ||
|
|
0632ed8778 | ||
|
|
aaefc58ee0 | ||
|
|
f24b2de3d3 | ||
|
|
fac1507f03 | ||
|
|
f863994084 | ||
|
|
e4a5d8c653 | ||
|
|
a6d0299c75 | ||
|
|
6ce80f7071 | ||
|
|
1fe462168c | ||
|
|
ed31a020ee | ||
|
|
f9ac19204f | ||
|
|
59965affbd | ||
|
|
b1c4f0b265 | ||
|
|
8de7c636cc | ||
|
|
059779231f | ||
|
|
ea37530b47 | ||
|
|
f5432e35a3 | ||
|
|
07cab212f0 | ||
|
|
0c1dc42748 | ||
|
|
676f82ae81 | ||
|
|
81bfc21a6a | ||
|
|
4e2c7caf2d | ||
|
|
d9e62c03eb | ||
|
|
a1a2d79442 | ||
|
|
ac900c89bb | ||
|
|
76df6072ff | ||
|
|
16f24e8797 | ||
|
|
40b2f1c3d9 | ||
|
|
648951a9c3 | ||
|
|
f72061a19a | ||
|
|
662205d34e | ||
|
|
4fb8beefaa | ||
|
|
304319c4ed | ||
|
|
c683d11c94 | ||
|
|
3eff45d793 | ||
|
|
4685a630a2 | ||
|
|
ee1d25f199 | ||
|
|
6fff24f30f | ||
|
|
23210a911e | ||
|
|
1391378861 | ||
|
|
f6220f9877 | ||
|
|
2df2bb27b0 | ||
|
|
f75b61a9e9 | ||
|
|
7f51e93864 | ||
|
|
4611af1663 | ||
|
|
ad5aa6bd9f | ||
|
|
9681068cf9 | ||
|
|
b6101d384d | ||
|
|
5fcb0cdd68 | ||
|
|
c878b43b64 | ||
|
|
2b84ac669c | ||
|
|
11d3976b88 | ||
|
|
40da9625a1 | ||
|
|
8d9babd4de | ||
|
|
e99ba957ec | ||
|
|
64ac1395e8 | ||
|
|
61cf087680 | ||
|
|
847a57cd12 | ||
|
|
fcd6ac97ed | ||
|
|
95be2a7f22 | ||
|
|
0e60c925cf | ||
|
|
d7ff22204a | ||
|
|
c0bd8b13da | ||
|
|
caeb887bf6 | ||
|
|
6b3166a7c7 | ||
|
|
25e2e136ef | ||
|
|
6874638bc4 | ||
|
|
e24663c5a9 | ||
|
|
c50e105a88 | ||
|
|
a766b30349 | ||
|
|
1faa8cb73c | ||
|
|
e89a91d927 | ||
|
|
909b147197 | ||
|
|
a88b3be7c4 | ||
|
|
a49ea5a58f | ||
|
|
30ebe0dc3c | ||
|
|
cef65f0715 | ||
|
|
6f3b2047ab | ||
|
|
02e8f26cea | ||
|
|
4a00a511bb | ||
|
|
a0d8d944e2 | ||
|
|
df3f537a66 | ||
|
|
7743152957 | ||
|
|
ab33d2a629 | ||
|
|
be3af2d29e | ||
|
|
c656ba3b4d | ||
|
|
dc5fa77a4e | ||
|
|
1e4a084c8e | ||
|
|
7967e854da | ||
|
|
6bd6d0c3c1 | ||
|
|
8e962fef5f | ||
|
|
574fe75245 | ||
|
|
c61a98f529 | ||
|
|
28bffe9466 | ||
|
|
ad65177a19 | ||
|
|
d44a5b6c47 | ||
|
|
1d65283e95 | ||
|
|
c464b57374 | ||
|
|
c5c38e152a | ||
|
|
d00df624f3 | ||
|
|
9752da9d9c | ||
|
|
04925b2202 | ||
|
|
d74278fb67 | ||
|
|
b68fd899d1 | ||
|
|
0b5f9b7204 | ||
|
|
9a8853f781 | ||
|
|
387a1898d9 | ||
|
|
3b30e61507 | ||
|
|
824f9e8f3c | ||
|
|
6cc403e67d | ||
|
|
72d5951d02 | ||
|
|
a3205beffb | ||
|
|
6930becd45 | ||
|
|
03a8770a6d | ||
|
|
bc56a1d56e | ||
|
|
ec7d9e6745 | ||
|
|
3bb4e4311c | ||
|
|
08f8c198ae | ||
|
|
a21cedf4ff | ||
|
|
3ef74cde5d | ||
|
|
cd81cdb399 | ||
|
|
1e828573b4 | ||
|
|
a5ccc85c8c | ||
|
|
b5475d0534 | ||
|
|
9521002f0a | ||
|
|
ec17bdd894 | ||
|
|
bb59c90248 | ||
|
|
5bff999d12 | ||
|
|
bb85929aa6 | ||
|
|
5653021094 | ||
|
|
974d829b05 | ||
|
|
91ac5d9bfd | ||
|
|
23d825aba1 | ||
|
|
f07a128413 | ||
|
|
71cd89264f | ||
|
|
19fab44152 | ||
|
|
79c7e09235 | ||
|
|
79f3fab05a | ||
|
|
604b9eaec5 | ||
|
|
50dbd6c9e6 | ||
|
|
98bcc6ca59 | ||
|
|
f13e86d8dd | ||
|
|
9ca768c740 | ||
|
|
d5fe3f702c | ||
|
|
73391a1baa | ||
|
|
b3c14229b0 | ||
|
|
2f186635cb | ||
|
|
342a7cda2d | ||
|
|
d1ea65d0a1 | ||
|
|
de42abb366 | ||
|
|
60ca7981bc | ||
|
|
0ef5b9147b | ||
|
|
ed242652d7 | ||
|
|
b37b679770 | ||
|
|
a0638d052d | ||
|
|
c027541eaf | ||
|
|
fd267bc7b7 | ||
|
|
bfaa559305 | ||
|
|
87789c8364 | ||
|
|
bcd65c1f6a | ||
|
|
59d53066d8 | ||
|
|
4a9952ec1b | ||
|
|
1dae7b7843 | ||
|
|
5885e330ef | ||
|
|
071d863e20 | ||
|
|
0916e7960b | ||
|
|
3d2a026fd0 | ||
|
|
dddbff4624 | ||
|
|
47e9b63e1a | ||
|
|
934acddef9 | ||
|
|
742d214d6e | ||
|
|
4137c5dfa7 | ||
|
|
7a8a46ddcb | ||
|
|
bcf0731aa0 | ||
|
|
ec090c2429 | ||
|
|
eea3024f43 | ||
|
|
2f308214c0 | ||
|
|
1b4e8e53f8 | ||
|
|
dcf6ee8592 | ||
|
|
372b2e762a | ||
|
|
6afa587d31 | ||
|
|
94ed6cf6ea | ||
|
|
bf37812ca7 | ||
|
|
b86bf4417e | ||
|
|
de13dd781f | ||
|
|
62788f99a4 | ||
|
|
ea5ff3a1f6 | ||
|
|
04ea31baab | ||
|
|
6f019e6e0a | ||
|
|
d707678dfb | ||
|
|
fc22cae4ac | ||
|
|
96161fe978 | ||
|
|
4453ba8d9e | ||
|
|
aa181c923b | ||
|
|
be7370daf3 | ||
|
|
9ea1f598ce | ||
|
|
f120bd42d3 | ||
|
|
fac4e96940 | ||
|
|
6d4e27ce29 | ||
|
|
4c078fa546 | ||
|
|
6c0baee610 | ||
|
|
1100a97621 | ||
|
|
766e167821 | ||
|
|
becbe24808 | ||
|
|
679ca5d8d3 | ||
|
|
f2c47886fd | ||
|
|
334c715e0f | ||
|
|
7b5a8b4a9d | ||
|
|
dea63512bb | ||
|
|
8a798be929 | ||
|
|
fb455ed547 | ||
|
|
f5897613fb | ||
|
|
55a1a9563a | ||
|
|
386bfe5d08 | ||
|
|
e9cd691132 | ||
|
|
80f2ba6ea6 | ||
|
|
136b0bfa59 | ||
|
|
b96f7314b4 | ||
|
|
ced2a92f40 | ||
|
|
e1d97c38f8 | ||
|
|
ec12d39d44 | ||
|
|
ff1f83b056 | ||
|
|
83b47f67b1 | ||
|
|
fb7b30c716 | ||
|
|
31d992d215 | ||
|
|
5aff2699bd | ||
|
|
527ca32197 | ||
|
|
5458eb835d | ||
|
|
144d9b7cc8 | ||
|
|
83e26c834e | ||
|
|
5001211369 | ||
|
|
11c7ace340 | ||
|
|
be7f3d5d20 | ||
|
|
0ab06100f4 | ||
|
|
ffb3d553cc | ||
|
|
fa7e0bfacf | ||
|
|
48134a2c22 | ||
|
|
64f570ab56 | ||
|
|
fd618871b4 | ||
|
|
67a42b5a44 | ||
|
|
c7914d30f9 | ||
|
|
1b8756562e | ||
|
|
275e0d2a99 | ||
|
|
0f5e55e7a8 | ||
|
|
1e9204bff3 | ||
|
|
05339a7b20 | ||
|
|
40b8f55358 | ||
|
|
5045d5c983 | ||
|
|
e09546cf05 | ||
|
|
786806dd44 | ||
|
|
79504027ef | ||
|
|
addac0e653 | ||
|
|
675a22ed66 | ||
|
|
cb9574eb85 | ||
|
|
21dfb842d7 | ||
|
|
d1b837f0ae | ||
|
|
0b20469c62 | ||
|
|
d7982daff5 | ||
|
|
9b17c57460 | ||
|
|
1b3540e6c6 | ||
|
|
7a048ee65f | ||
|
|
c9a1923bb4 | ||
|
|
b482f71e9f | ||
|
|
1485396abb | ||
|
|
5ee5c86eeb | ||
|
|
b5dcb372e4 | ||
|
|
066c6da6a0 | ||
|
|
e30cedd44b | ||
|
|
3bcd494ef4 | ||
|
|
0e725a7d22 | ||
|
|
ba0511fd80 | ||
|
|
4a1550d22d | ||
|
|
d1481ba783 | ||
|
|
dc6de33c3d | ||
|
|
c4b9e6778f | ||
|
|
341eed3d30 | ||
|
|
6f2f59f2b3 | ||
|
|
bb2fc8b5e7 | ||
|
|
67132945bb | ||
|
|
f0ca0671c7 | ||
|
|
578977bb5e | ||
|
|
9615575afc | ||
|
|
4293c00b84 | ||
|
|
506ad7d7c1 | ||
|
|
fdd6f2ad58 | ||
|
|
33bcd3dc3b | ||
|
|
1f5febb4b8 | ||
|
|
ae871ca923 | ||
|
|
a2443de5fa | ||
|
|
f84a2a8f31 | ||
|
|
000214c4bb | ||
|
|
c5a66d1697 | ||
|
|
afdce12c89 | ||
|
|
82e11973cc | ||
|
|
b129136c7a | ||
|
|
599e4335a4 | ||
|
|
a1946570d8 | ||
|
|
d0bc520569 | ||
|
|
748625cdaf | ||
|
|
61413973e8 | ||
|
|
94de871546 | ||
|
|
e042d7e685 | ||
|
|
ae4e280602 | ||
|
|
cbea11c9f0 | ||
|
|
2c32558a3c | ||
|
|
5f970120f0 | ||
|
|
998e2d91f8 | ||
|
|
e1060a71a1 | ||
|
|
97fa8f6590 | ||
|
|
dab1de9f38 | ||
|
|
8d48d0a9d9 | ||
|
|
9608844f96 | ||
|
|
f69b903b4c | ||
|
|
81e217fe6b | ||
|
|
ab97bcf662 | ||
|
|
25e48a3aae | ||
|
|
8a5e0e2b2b | ||
|
|
4cde2e0159 | ||
|
|
047a457fa4 | ||
|
|
e94ec59733 | ||
|
|
13397841ab | ||
|
|
c60f8e3b49 | ||
|
|
5e75a14a66 | ||
|
|
e7e52781ff | ||
|
|
bb9f97308d | ||
|
|
4d39650961 | ||
|
|
8fd31f6245 | ||
|
|
eadb4e868b | ||
|
|
285bab4752 | ||
|
|
995bbf38f1 | ||
|
|
d4f123cc48 | ||
|
|
cb62e86f83 | ||
|
|
781ddf7868 | ||
|
|
64a9c2528b | ||
|
|
d0d97e2974 | ||
|
|
9562912cea | ||
|
|
9bdb06b436 | ||
|
|
caad9f1e01 | ||
|
|
1d5922fade | ||
|
|
3025b3cebb | ||
|
|
978a37c823 | ||
|
|
5a5c43511a | ||
|
|
d9bede0314 | ||
|
|
22b64948f6 | ||
|
|
7c233dbb36 | ||
|
|
a75a5b54c7 | ||
|
|
f97ca67176 | ||
|
|
084aa19f02 | ||
|
|
1ecfabe525 | ||
|
|
4df841fe75 | ||
|
|
a263aa6140 | ||
|
|
179ae7da8f | ||
|
|
c4df59ad43 | ||
|
|
785cf28fff | ||
|
|
a96197f564 | ||
|
|
ab10d79855 | ||
|
|
7fcb705b80 | ||
|
|
b956cdf818 | ||
|
|
ed17f54c8b | ||
|
|
860981d8d8 | ||
|
|
52181baaea | ||
|
|
de3869bb4d | ||
|
|
ce9b3cd3e9 | ||
|
|
db4ede9743 | ||
|
|
2cb2340f7a | ||
|
|
4df44c16ba | ||
|
|
81fe69cae5 | ||
|
|
dd6a6e1190 | ||
|
|
edb359cce4 | ||
|
|
6ed5eda300 | ||
|
|
11a4c9d30d | ||
|
|
15a0b9e570 | ||
|
|
c490d8cc73 | ||
|
|
48312e579a | ||
|
|
bc32444b23 | ||
|
|
18e8545297 | ||
|
|
6f7adc533a | ||
|
|
40218a82ba | ||
|
|
1c3b22058f | ||
|
|
3920cafdd6 | ||
|
|
ec28784fdc | ||
|
|
55aeec04f5 | ||
|
|
906077181b | ||
|
|
89a385d79f | ||
|
|
4a2d00eafd | ||
|
|
207c3a0c20 | ||
|
|
ae2e93f89b | ||
|
|
9e9acce577 | ||
|
|
fe5438200b | ||
|
|
77c09e1130 | ||
|
|
16786da735 | ||
|
|
aaa2efbe98 | ||
|
|
aca5967416 | ||
|
|
67a746e87f | ||
|
|
7bec435130 | ||
|
|
5c52644b10 | ||
|
|
2ce9fe4ad0 | ||
|
|
cd8b405bd0 | ||
|
|
4707f7ebb4 | ||
|
|
c39ee9ee2b | ||
|
|
350ca72c04 | ||
|
|
1fb0495a72 | ||
|
|
85ee1d962b | ||
|
|
51a7bda625 | ||
|
|
6e7b1c4b59 | ||
|
|
2991dd3d22 | ||
|
|
ac32e66cf9 | ||
|
|
f79d9dce16 | ||
|
|
ba5cbbf107 | ||
|
|
233b26ab35 | ||
|
|
791a94bed0 | ||
|
|
e969a169ef | ||
|
|
6d8d34be6d | ||
|
|
1363e3d6d5 | ||
|
|
965525667b | ||
|
|
6550815c3a | ||
|
|
7439e4f41b | ||
|
|
ac04dd374f | ||
|
|
035a6cb09a | ||
|
|
a32cb49b60 | ||
|
|
20d7454c9b | ||
|
|
5819ca8944 | ||
|
|
79028d4388 | ||
|
|
325ab6b0a8 | ||
|
|
91a07ff618 | ||
|
|
d5c4800112 | ||
|
|
42d5d705f9 | ||
|
|
116880a5a0 | ||
|
|
4145e50d85 | ||
|
|
20f5d185a6 | ||
|
|
1887acca9e | ||
|
|
92e7562a99 | ||
|
|
87d0d17ab5 | ||
|
|
a57c8228ff | ||
|
|
1ee95841bd | ||
|
|
7d8c6804e2 | ||
|
|
af3162d3aa | ||
|
|
5b2a9422f0 | ||
|
|
c1858b7ec8 | ||
|
|
82914d2ae8 | ||
|
|
81a90e5277 | ||
|
|
1c3a221d3b | ||
|
|
7bd42e609d | ||
|
|
a2522839d8 | ||
|
|
59a5cb387a | ||
|
|
8322d4e47f | ||
|
|
3e472e81f9 | ||
|
|
038914b7c8 | ||
|
|
d2f4a71cd5 | ||
|
|
2abd97592f | ||
|
|
6abb0454ad | ||
|
|
db6f71d4c9 | ||
|
|
fd03538bf9 | ||
|
|
1f70313e59 | ||
|
|
07daee132b | ||
|
|
9595afda18 | ||
|
|
c1395f72cd | ||
|
|
007b183d74 | ||
|
|
add9f1fbd9 | ||
|
|
e3bf79ffa0 | ||
|
|
fb1270f1f8 | ||
|
|
72bb24e2db | ||
|
|
a7be77beef |
@@ -1,6 +1,7 @@
|
|||||||
group: Hardware
|
group: Hardware - AMD Build
|
||||||
steps:
|
steps:
|
||||||
- label: "AMD: :docker: build image"
|
- label: "AMD: :docker: build image"
|
||||||
|
key: image-build-amd
|
||||||
depends_on: []
|
depends_on: []
|
||||||
device: amd_cpu
|
device: amd_cpu
|
||||||
no_plugin: true
|
no_plugin: true
|
||||||
@@ -9,7 +10,7 @@ steps:
|
|||||||
docker build
|
docker build
|
||||||
--build-arg max_jobs=16
|
--build-arg max_jobs=16
|
||||||
--build-arg REMOTE_VLLM=1
|
--build-arg REMOTE_VLLM=1
|
||||||
--build-arg ARG_PYTORCH_ROCM_ARCH='gfx90a;gfx942'
|
--build-arg ARG_PYTORCH_ROCM_ARCH='gfx942;gfx950'
|
||||||
--build-arg VLLM_BRANCH=$BUILDKITE_COMMIT
|
--build-arg VLLM_BRANCH=$BUILDKITE_COMMIT
|
||||||
--tag "rocm/vllm-ci:${BUILDKITE_COMMIT}"
|
--tag "rocm/vllm-ci:${BUILDKITE_COMMIT}"
|
||||||
-f docker/Dockerfile.rocm
|
-f docker/Dockerfile.rocm
|
||||||
|
|||||||
@@ -1,8 +0,0 @@
|
|||||||
group: Hardware
|
|
||||||
steps:
|
|
||||||
- label: "Arm CPU Test"
|
|
||||||
soft_fail: true
|
|
||||||
device: arm_cpu
|
|
||||||
no_plugin: true
|
|
||||||
commands:
|
|
||||||
- bash .buildkite/scripts/hardware_ci/run-cpu-test-arm.sh
|
|
||||||
100
.buildkite/hardware_tests/cpu.yaml
Normal file
100
.buildkite/hardware_tests/cpu.yaml
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
group: CPU
|
||||||
|
depends_on: []
|
||||||
|
steps:
|
||||||
|
- label: CPU-Kernel Tests
|
||||||
|
depends_on: []
|
||||||
|
soft_fail: true
|
||||||
|
device: intel_cpu
|
||||||
|
no_plugin: true
|
||||||
|
source_file_dependencies:
|
||||||
|
- csrc/cpu/
|
||||||
|
- cmake/cpu_extension.cmake
|
||||||
|
- CMakeLists.txt
|
||||||
|
- vllm/_custom_ops.py
|
||||||
|
- tests/kernels/attention/test_cpu_attn.py
|
||||||
|
- tests/kernels/moe/test_cpu_fused_moe.py
|
||||||
|
- tests/kernels/test_onednn.py
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
bash .buildkite/scripts/hardware_ci/run-cpu-test.sh 20m "
|
||||||
|
pytest -x -v -s tests/kernels/attention/test_cpu_attn.py
|
||||||
|
pytest -x -v -s tests/kernels/moe/test_cpu_fused_moe.py
|
||||||
|
pytest -x -v -s tests/kernels/test_onednn.py"
|
||||||
|
|
||||||
|
- label: CPU-Language Generation and Pooling Model Tests
|
||||||
|
depends_on: []
|
||||||
|
soft_fail: true
|
||||||
|
device: intel_cpu
|
||||||
|
no_plugin: true
|
||||||
|
source_file_dependencies:
|
||||||
|
- csrc/cpu/
|
||||||
|
- vllm/
|
||||||
|
- tests/models/language/generation/
|
||||||
|
- tests/models/language/pooling/
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
bash .buildkite/scripts/hardware_ci/run-cpu-test.sh 30m "
|
||||||
|
pytest -x -v -s tests/models/language/generation -m cpu_model
|
||||||
|
pytest -x -v -s tests/models/language/pooling -m cpu_model"
|
||||||
|
|
||||||
|
- label: CPU-Quantization Model Tests
|
||||||
|
depends_on: []
|
||||||
|
soft_fail: true
|
||||||
|
device: intel_cpu
|
||||||
|
no_plugin: true
|
||||||
|
source_file_dependencies:
|
||||||
|
- csrc/cpu/
|
||||||
|
- vllm/model_executor/layers/quantization/cpu_wna16.py
|
||||||
|
- vllm/model_executor/layers/quantization/gptq_marlin.py
|
||||||
|
- vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_int8.py
|
||||||
|
- vllm/model_executor/layers/quantization/kernels/scaled_mm/cpu.py
|
||||||
|
- vllm/model_executor/layers/quantization/kernels/mixed_precision/cpu.py
|
||||||
|
- tests/quantization/test_compressed_tensors.py
|
||||||
|
- tests/quantization/test_cpu_wna16.py
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
bash .buildkite/scripts/hardware_ci/run-cpu-test.sh 20m "
|
||||||
|
pytest -x -v -s tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_logprobs
|
||||||
|
pytest -x -v -s tests/quantization/test_cpu_wna16.py"
|
||||||
|
|
||||||
|
- label: CPU-Distributed Tests
|
||||||
|
depends_on: []
|
||||||
|
soft_fail: true
|
||||||
|
device: intel_cpu
|
||||||
|
no_plugin: true
|
||||||
|
source_file_dependencies:
|
||||||
|
- csrc/cpu/shm.cpp
|
||||||
|
- vllm/v1/worker/cpu_worker.py
|
||||||
|
- vllm/v1/worker/gpu_worker.py
|
||||||
|
- vllm/v1/worker/cpu_model_runner.py
|
||||||
|
- vllm/v1/worker/gpu_model_runner.py
|
||||||
|
- vllm/platforms/cpu.py
|
||||||
|
- vllm/distributed/parallel_state.py
|
||||||
|
- vllm/distributed/device_communicators/cpu_communicator.py
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
bash .buildkite/scripts/hardware_ci/run-cpu-test.sh 10m "
|
||||||
|
bash .buildkite/scripts/hardware_ci/run-cpu-distributed-smoke-test.sh"
|
||||||
|
|
||||||
|
- label: CPU-Multi-Modal Model Tests %N
|
||||||
|
depends_on: []
|
||||||
|
soft_fail: true
|
||||||
|
device: intel_cpu
|
||||||
|
no_plugin: true
|
||||||
|
source_file_dependencies:
|
||||||
|
# - vllm/
|
||||||
|
- vllm/model_executor/layers/rotary_embedding
|
||||||
|
- tests/models/multimodal/generation/
|
||||||
|
commands:
|
||||||
|
- |
|
||||||
|
bash .buildkite/scripts/hardware_ci/run-cpu-test.sh 45m "
|
||||||
|
pytest -x -v -s tests/models/multimodal/generation --ignore=tests/models/multimodal/generation/test_pixtral.py -m cpu_model --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --shard-id=$$BUILDKITE_PARALLEL_JOB"
|
||||||
|
parallelism: 2
|
||||||
|
|
||||||
|
- label: "Arm CPU Test"
|
||||||
|
depends_on: []
|
||||||
|
soft_fail: true
|
||||||
|
device: arm_cpu
|
||||||
|
no_plugin: true
|
||||||
|
commands:
|
||||||
|
- bash .buildkite/scripts/hardware_ci/run-cpu-test-arm.sh
|
||||||
@@ -1,13 +1,6 @@
|
|||||||
group: Hardware
|
group: Hardware
|
||||||
depends_on: ~
|
depends_on: ~
|
||||||
steps:
|
steps:
|
||||||
- label: "Intel CPU Test"
|
|
||||||
soft_fail: true
|
|
||||||
device: intel_cpu
|
|
||||||
no_plugin: true
|
|
||||||
commands:
|
|
||||||
- bash .buildkite/scripts/hardware_ci/run-cpu-test.sh
|
|
||||||
|
|
||||||
- label: "Intel HPU Test"
|
- label: "Intel HPU Test"
|
||||||
soft_fail: true
|
soft_fail: true
|
||||||
device: intel_hpu
|
device: intel_hpu
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ clean_docker_tag() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
print_usage_and_exit() {
|
print_usage_and_exit() {
|
||||||
echo "Usage: $0 <registry> <repo> <commit> <branch> <vllm_use_precompiled> <vllm_merge_base_commit> <cache_from> <cache_to>"
|
echo "Usage: $0 <registry> <repo> <commit> <branch> <image_tag> [<image_tag_latest>]"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,11 +142,16 @@ resolve_parent_commit() {
|
|||||||
|
|
||||||
print_bake_config() {
|
print_bake_config() {
|
||||||
echo "--- :page_facing_up: Resolved bake configuration"
|
echo "--- :page_facing_up: Resolved bake configuration"
|
||||||
BAKE_CONFIG_FILE="bake-config-build-${BUILDKITE_BUILD_NUMBER:-local}.json"
|
# Write to a temp directory to avoid polluting the repo root (which is the
|
||||||
|
# Docker build context). Files left in the repo root get COPY'd into the
|
||||||
|
# image and can cause duplicate artifact uploads from downstream steps.
|
||||||
|
local bake_tmp
|
||||||
|
bake_tmp="$(mktemp -d)"
|
||||||
|
BAKE_CONFIG_FILE="${bake_tmp}/bake-config-build-${BUILDKITE_BUILD_NUMBER:-local}.json"
|
||||||
docker buildx bake -f "${VLLM_BAKE_FILE_PATH}" -f "${CI_HCL_PATH}" --print "${TARGET}" | tee "${BAKE_CONFIG_FILE}" || true
|
docker buildx bake -f "${VLLM_BAKE_FILE_PATH}" -f "${CI_HCL_PATH}" --print "${TARGET}" | tee "${BAKE_CONFIG_FILE}" || true
|
||||||
echo "Saved bake config to ${BAKE_CONFIG_FILE}"
|
echo "Saved bake config to ${BAKE_CONFIG_FILE}"
|
||||||
echo "--- :arrow_down: Uploading bake config to Buildkite"
|
echo "--- :arrow_down: Uploading bake config to Buildkite"
|
||||||
buildkite-agent artifact upload "${BAKE_CONFIG_FILE}"
|
(cd "$(dirname "${BAKE_CONFIG_FILE}")" && buildkite-agent artifact upload "$(basename "${BAKE_CONFIG_FILE}")")
|
||||||
}
|
}
|
||||||
|
|
||||||
#################################
|
#################################
|
||||||
@@ -154,7 +159,7 @@ print_bake_config() {
|
|||||||
#################################
|
#################################
|
||||||
print_instance_info
|
print_instance_info
|
||||||
|
|
||||||
if [[ $# -lt 7 ]]; then
|
if [[ $# -lt 5 ]]; then
|
||||||
print_usage_and_exit
|
print_usage_and_exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -163,10 +168,8 @@ REGISTRY=$1
|
|||||||
REPO=$2
|
REPO=$2
|
||||||
BUILDKITE_COMMIT=$3
|
BUILDKITE_COMMIT=$3
|
||||||
BRANCH=$4
|
BRANCH=$4
|
||||||
VLLM_USE_PRECOMPILED=$5
|
IMAGE_TAG=$5
|
||||||
VLLM_MERGE_BASE_COMMIT=$6
|
IMAGE_TAG_LATEST=${6:-} # only used for main branch, optional
|
||||||
IMAGE_TAG=$7
|
|
||||||
IMAGE_TAG_LATEST=${8:-} # only used for main branch, optional
|
|
||||||
|
|
||||||
# build config
|
# build config
|
||||||
TARGET="test-ci"
|
TARGET="test-ci"
|
||||||
@@ -193,8 +196,6 @@ export CACHE_FROM
|
|||||||
export CACHE_FROM_BASE_BRANCH
|
export CACHE_FROM_BASE_BRANCH
|
||||||
export CACHE_FROM_MAIN
|
export CACHE_FROM_MAIN
|
||||||
export CACHE_TO
|
export CACHE_TO
|
||||||
export VLLM_USE_PRECOMPILED
|
|
||||||
export VLLM_MERGE_BASE_COMMIT
|
|
||||||
|
|
||||||
# print args
|
# print args
|
||||||
echo "--- :mag: Arguments"
|
echo "--- :mag: Arguments"
|
||||||
@@ -202,8 +203,6 @@ echo "REGISTRY: ${REGISTRY}"
|
|||||||
echo "REPO: ${REPO}"
|
echo "REPO: ${REPO}"
|
||||||
echo "BUILDKITE_COMMIT: ${BUILDKITE_COMMIT}"
|
echo "BUILDKITE_COMMIT: ${BUILDKITE_COMMIT}"
|
||||||
echo "BRANCH: ${BRANCH}"
|
echo "BRANCH: ${BRANCH}"
|
||||||
echo "VLLM_USE_PRECOMPILED: ${VLLM_USE_PRECOMPILED}"
|
|
||||||
echo "VLLM_MERGE_BASE_COMMIT: ${VLLM_MERGE_BASE_COMMIT}"
|
|
||||||
echo "IMAGE_TAG: ${IMAGE_TAG}"
|
echo "IMAGE_TAG: ${IMAGE_TAG}"
|
||||||
echo "IMAGE_TAG_LATEST: ${IMAGE_TAG_LATEST}"
|
echo "IMAGE_TAG_LATEST: ${IMAGE_TAG_LATEST}"
|
||||||
|
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ steps:
|
|||||||
- label: ":docker: Build image"
|
- label: ":docker: Build image"
|
||||||
key: image-build
|
key: image-build
|
||||||
depends_on: []
|
depends_on: []
|
||||||
|
timeout_in_minutes: 600
|
||||||
commands:
|
commands:
|
||||||
- if [[ "$BUILDKITE_BRANCH" != "main" ]]; then .buildkite/image_build/image_build.sh $REGISTRY $REPO $BUILDKITE_COMMIT $BRANCH $VLLM_USE_PRECOMPILED $VLLM_MERGE_BASE_COMMIT $IMAGE_TAG; fi
|
- if [[ "$BUILDKITE_BRANCH" == "main" ]]; then .buildkite/image_build/image_build.sh $REGISTRY $REPO $BUILDKITE_COMMIT $BRANCH $IMAGE_TAG $IMAGE_TAG_LATEST; else .buildkite/image_build/image_build.sh $REGISTRY $REPO $BUILDKITE_COMMIT $BRANCH $IMAGE_TAG; fi
|
||||||
- if [[ "$BUILDKITE_BRANCH" == "main" ]]; then .buildkite/image_build/image_build.sh $REGISTRY $REPO $BUILDKITE_COMMIT $BRANCH $VLLM_USE_PRECOMPILED $VLLM_MERGE_BASE_COMMIT $IMAGE_TAG $IMAGE_TAG_LATEST; fi
|
|
||||||
retry:
|
retry:
|
||||||
automatic:
|
automatic:
|
||||||
- exit_status: -1 # Agent was lost
|
- exit_status: -1 # Agent was lost
|
||||||
@@ -41,7 +41,7 @@ steps:
|
|||||||
limit: 2
|
limit: 2
|
||||||
- exit_status: -10 # Agent was lost
|
- exit_status: -10 # Agent was lost
|
||||||
limit: 2
|
limit: 2
|
||||||
|
|
||||||
- label: ":docker: Build CPU arm64 image"
|
- label: ":docker: Build CPU arm64 image"
|
||||||
key: cpu-arm64-image-build
|
key: cpu-arm64-image-build
|
||||||
depends_on: []
|
depends_on: []
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ REPO=$2
|
|||||||
BUILDKITE_COMMIT=$3
|
BUILDKITE_COMMIT=$3
|
||||||
|
|
||||||
# authenticate with AWS ECR
|
# authenticate with AWS ECR
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin $REGISTRY
|
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin "$REGISTRY"
|
||||||
|
|
||||||
# skip build if image already exists
|
# skip build if image already exists
|
||||||
if [[ -z $(docker manifest inspect $REGISTRY/$REPO:$BUILDKITE_COMMIT-cpu) ]]; then
|
if [[ -z $(docker manifest inspect "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-cpu) ]]; then
|
||||||
echo "Image not found, proceeding with build..."
|
echo "Image not found, proceeding with build..."
|
||||||
else
|
else
|
||||||
echo "Image found"
|
echo "Image found"
|
||||||
@@ -24,13 +24,13 @@ fi
|
|||||||
# build
|
# build
|
||||||
docker build --file docker/Dockerfile.cpu \
|
docker build --file docker/Dockerfile.cpu \
|
||||||
--build-arg max_jobs=16 \
|
--build-arg max_jobs=16 \
|
||||||
--build-arg buildkite_commit=$BUILDKITE_COMMIT \
|
--build-arg buildkite_commit="$BUILDKITE_COMMIT" \
|
||||||
--build-arg VLLM_CPU_AVX512BF16=true \
|
--build-arg VLLM_CPU_AVX512BF16=true \
|
||||||
--build-arg VLLM_CPU_AVX512VNNI=true \
|
--build-arg VLLM_CPU_AVX512VNNI=true \
|
||||||
--build-arg VLLM_CPU_AMXBF16=true \
|
--build-arg VLLM_CPU_AMXBF16=true \
|
||||||
--tag $REGISTRY/$REPO:$BUILDKITE_COMMIT-cpu \
|
--tag "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-cpu \
|
||||||
--target vllm-test \
|
--target vllm-test \
|
||||||
--progress plain .
|
--progress plain .
|
||||||
|
|
||||||
# push
|
# push
|
||||||
docker push $REGISTRY/$REPO:$BUILDKITE_COMMIT-cpu
|
docker push "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-cpu
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ REPO=$2
|
|||||||
BUILDKITE_COMMIT=$3
|
BUILDKITE_COMMIT=$3
|
||||||
|
|
||||||
# authenticate with AWS ECR
|
# authenticate with AWS ECR
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin $REGISTRY
|
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin "$REGISTRY"
|
||||||
|
|
||||||
# skip build if image already exists
|
# skip build if image already exists
|
||||||
if [[ -z $(docker manifest inspect $REGISTRY/$REPO:$BUILDKITE_COMMIT-cpu) ]]; then
|
if [[ -z $(docker manifest inspect "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-arm64-cpu) ]]; then
|
||||||
echo "Image not found, proceeding with build..."
|
echo "Image not found, proceeding with build..."
|
||||||
else
|
else
|
||||||
echo "Image found"
|
echo "Image found"
|
||||||
@@ -24,10 +24,10 @@ fi
|
|||||||
# build
|
# build
|
||||||
docker build --file docker/Dockerfile.cpu \
|
docker build --file docker/Dockerfile.cpu \
|
||||||
--build-arg max_jobs=16 \
|
--build-arg max_jobs=16 \
|
||||||
--build-arg buildkite_commit=$BUILDKITE_COMMIT \
|
--build-arg buildkite_commit="$BUILDKITE_COMMIT" \
|
||||||
--tag $REGISTRY/$REPO:$BUILDKITE_COMMIT-cpu \
|
--tag "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-arm64-cpu \
|
||||||
--target vllm-test \
|
--target vllm-test \
|
||||||
--progress plain .
|
--progress plain .
|
||||||
|
|
||||||
# push
|
# push
|
||||||
docker push $REGISTRY/$REPO:$BUILDKITE_COMMIT-cpu
|
docker push "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-arm64-cpu
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ REPO=$2
|
|||||||
BUILDKITE_COMMIT=$3
|
BUILDKITE_COMMIT=$3
|
||||||
|
|
||||||
# authenticate with AWS ECR
|
# authenticate with AWS ECR
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin $REGISTRY
|
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin "$REGISTRY"
|
||||||
|
|
||||||
# skip build if image already exists
|
# skip build if image already exists
|
||||||
if [[ -z $(docker manifest inspect $REGISTRY/$REPO:$BUILDKITE_COMMIT-hpu) ]]; then
|
if [[ -z $(docker manifest inspect "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-hpu) ]]; then
|
||||||
echo "Image not found, proceeding with build..."
|
echo "Image not found, proceeding with build..."
|
||||||
else
|
else
|
||||||
echo "Image found"
|
echo "Image found"
|
||||||
@@ -25,10 +25,10 @@ fi
|
|||||||
docker build \
|
docker build \
|
||||||
--file tests/pytorch_ci_hud_benchmark/Dockerfile.hpu \
|
--file tests/pytorch_ci_hud_benchmark/Dockerfile.hpu \
|
||||||
--build-arg max_jobs=16 \
|
--build-arg max_jobs=16 \
|
||||||
--build-arg buildkite_commit=$BUILDKITE_COMMIT \
|
--build-arg buildkite_commit="$BUILDKITE_COMMIT" \
|
||||||
--tag $REGISTRY/$REPO:$BUILDKITE_COMMIT-hpu \
|
--tag "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-hpu \
|
||||||
--progress plain \
|
--progress plain \
|
||||||
https://github.com/vllm-project/vllm-gaudi.git
|
https://github.com/vllm-project/vllm-gaudi.git
|
||||||
|
|
||||||
# push
|
# push
|
||||||
docker push $REGISTRY/$REPO:$BUILDKITE_COMMIT-hpu
|
docker push "$REGISTRY"/"$REPO":"$BUILDKITE_COMMIT"-hpu
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
# We can use this script to compute baseline accuracy on chartqa for vllm.
|
# We can use this script to compute baseline accuracy on chartqa for vllm.
|
||||||
#
|
#
|
||||||
# Make sure you have lm-eval-harness installed:
|
# Make sure you have lm-eval-harness installed:
|
||||||
# pip install "lm-eval[api]>=0.4.9.2"
|
# pip install "lm-eval[api]>=0.4.11"
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
echo``
|
echo``
|
||||||
@@ -41,4 +41,4 @@ lm_eval --model vllm-vlm \
|
|||||||
--tasks chartqa \
|
--tasks chartqa \
|
||||||
--batch_size auto \
|
--batch_size auto \
|
||||||
--apply_chat_template \
|
--apply_chat_template \
|
||||||
--limit $LIMIT
|
--limit "$LIMIT"
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
# We can use this script to compute baseline accuracy on GSM for transformers.
|
# We can use this script to compute baseline accuracy on GSM for transformers.
|
||||||
#
|
#
|
||||||
# Make sure you have lm-eval-harness installed:
|
# Make sure you have lm-eval-harness installed:
|
||||||
# pip install "lm-eval[api]>=0.4.9.2"
|
# pip install "lm-eval[api]>=0.4.11"
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
echo``
|
echo``
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# We use this for fp8, which HF does not support.
|
# We use this for fp8, which HF does not support.
|
||||||
#
|
#
|
||||||
# Make sure you have lm-eval-harness installed:
|
# Make sure you have lm-eval-harness installed:
|
||||||
# pip install "lm-eval[api]>=0.4.9.2"
|
# pip install "lm-eval[api]>=0.4.11"
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
echo``
|
echo``
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
# We use this for fp8, which HF does not support.
|
# We use this for fp8, which HF does not support.
|
||||||
#
|
#
|
||||||
# Make sure you have lm-eval-harness installed:
|
# Make sure you have lm-eval-harness installed:
|
||||||
# pip install "lm-eval[api]>=0.4.9.2"
|
# pip install "lm-eval[api]>=0.4.11"
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
echo``
|
echo``
|
||||||
@@ -20,14 +20,11 @@ usage() {
|
|||||||
echo
|
echo
|
||||||
}
|
}
|
||||||
|
|
||||||
while getopts "m:b:l:f:t:" OPT; do
|
while getopts "m:l:f:t:" OPT; do
|
||||||
case ${OPT} in
|
case ${OPT} in
|
||||||
m )
|
m )
|
||||||
MODEL="$OPTARG"
|
MODEL="$OPTARG"
|
||||||
;;
|
;;
|
||||||
b )
|
|
||||||
BATCH_SIZE="$OPTARG"
|
|
||||||
;;
|
|
||||||
l )
|
l )
|
||||||
LIMIT="$OPTARG"
|
LIMIT="$OPTARG"
|
||||||
;;
|
;;
|
||||||
|
|||||||
@@ -9,8 +9,10 @@ import json
|
|||||||
import os
|
import os
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from importlib import util
|
from importlib import util
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
import regex as re
|
||||||
|
|
||||||
pd.options.display.float_format = "{:.2f}".format
|
pd.options.display.float_format = "{:.2f}".format
|
||||||
plotly_found = util.find_spec("plotly.express") is not None
|
plotly_found = util.find_spec("plotly.express") is not None
|
||||||
@@ -275,6 +277,131 @@ def _apply_two_decimals(
|
|||||||
return styler.format({c: "{:.2f}" for c in num_cols}, na_rep="")
|
return styler.format({c: "{:.2f}" for c in num_cols}, na_rep="")
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# Export helpers (Excel + CSV)
|
||||||
|
# -----------------------------
|
||||||
|
def _sanitize_sheet_name(name: str) -> str:
|
||||||
|
"""
|
||||||
|
Excel sheet constraints:
|
||||||
|
- max 31 chars
|
||||||
|
- cannot contain: : \ / ? * [ ]
|
||||||
|
- cannot be empty
|
||||||
|
"""
|
||||||
|
name = "sheet" if name is None else str(name)
|
||||||
|
name = re.sub(r"[:\\/?*\[\]]", "_", name)
|
||||||
|
name = name.strip().strip("'")
|
||||||
|
name = re.sub(r"\s+", " ", name)
|
||||||
|
if not name:
|
||||||
|
name = "sheet"
|
||||||
|
return name[:31]
|
||||||
|
|
||||||
|
|
||||||
|
def _group_to_sheet_base(group_cols: list[str], gkey_tuple) -> str:
|
||||||
|
d = dict(zip(group_cols, gkey_tuple))
|
||||||
|
model = d.get("Model", "model")
|
||||||
|
model_short = str(model).split("/")[-1]
|
||||||
|
ilen = d.get("Input Len", "")
|
||||||
|
olen = d.get("Output Len", "")
|
||||||
|
lens = f"_{ilen}x{olen}" if ilen != "" and olen != "" else ""
|
||||||
|
return _sanitize_sheet_name(f"{model_short}{lens}")
|
||||||
|
|
||||||
|
|
||||||
|
def _write_tables_to_excel_sheet(
|
||||||
|
writer: pd.ExcelWriter, sheet: str, blocks: list[tuple[str, pd.DataFrame]]
|
||||||
|
):
|
||||||
|
startrow = 0
|
||||||
|
for title, df in blocks:
|
||||||
|
pd.DataFrame([[title]]).to_excel(
|
||||||
|
writer, sheet_name=sheet, index=False, header=False, startrow=startrow
|
||||||
|
)
|
||||||
|
startrow += 1
|
||||||
|
df.to_excel(writer, sheet_name=sheet, index=False, startrow=startrow)
|
||||||
|
startrow += len(df) + 3
|
||||||
|
|
||||||
|
|
||||||
|
def _safe_filename(s: str) -> str:
|
||||||
|
s = re.sub(r"[^\w\-.]+", "_", str(s).strip())
|
||||||
|
return s[:180] if len(s) > 180 else s
|
||||||
|
|
||||||
|
|
||||||
|
# -----------------------------
|
||||||
|
# vLLM environment export helper
|
||||||
|
# -----------------------------
|
||||||
|
def _parse_vllm_env_txt(env_path: Path) -> pd.DataFrame:
|
||||||
|
"""Parse vllm_env.txt into a flat table (Section, Key, Value).
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- section headers as standalone lines (no ':' or '=')
|
||||||
|
- key-value lines like 'OS: Ubuntu ...'
|
||||||
|
- env var lines like 'HF_HOME=/data/hf'
|
||||||
|
"""
|
||||||
|
lines = env_path.read_text(encoding="utf-8", errors="replace").splitlines()
|
||||||
|
section = "General"
|
||||||
|
rows: list[dict] = []
|
||||||
|
|
||||||
|
def set_section(s: str):
|
||||||
|
nonlocal section
|
||||||
|
s = (s or "").strip()
|
||||||
|
if s:
|
||||||
|
section = s
|
||||||
|
|
||||||
|
for raw in lines:
|
||||||
|
stripped = raw.strip()
|
||||||
|
if not stripped:
|
||||||
|
continue
|
||||||
|
# divider lines like =====
|
||||||
|
if set(stripped) <= {"="}:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# section header heuristic: short standalone line
|
||||||
|
if ":" not in stripped and "=" not in stripped and len(stripped) <= 64:
|
||||||
|
if stripped.lower().startswith("collecting environment information"):
|
||||||
|
continue
|
||||||
|
set_section(stripped)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# env var style: KEY=VALUE (and not a URL with :)
|
||||||
|
if "=" in stripped and ":" not in stripped:
|
||||||
|
k, v = stripped.split("=", 1)
|
||||||
|
k = k.strip()
|
||||||
|
v = v.strip()
|
||||||
|
if k:
|
||||||
|
rows.append({"Section": section, "Key": k, "Value": v})
|
||||||
|
continue
|
||||||
|
|
||||||
|
# key: value
|
||||||
|
if ":" in stripped:
|
||||||
|
k, v = stripped.split(":", 1)
|
||||||
|
k = k.strip()
|
||||||
|
v = v.strip()
|
||||||
|
if k:
|
||||||
|
rows.append({"Section": section, "Key": k, "Value": v})
|
||||||
|
continue
|
||||||
|
|
||||||
|
return pd.DataFrame(rows, columns=["Section", "Key", "Value"])
|
||||||
|
|
||||||
|
|
||||||
|
def _load_env_df_for_inputs(args, files: list[str]) -> pd.DataFrame | None:
|
||||||
|
"""Load vllm_env.txt next to the *original* input JSON file.
|
||||||
|
|
||||||
|
Note: when only one -f is provided, the script may split JSON into ./splits/...,
|
||||||
|
but vllm_env.txt typically lives next to the original benchmark_results.json.
|
||||||
|
"""
|
||||||
|
base_dir: Path | None = None
|
||||||
|
if getattr(args, "file", None):
|
||||||
|
base_dir = Path(args.file[0]).resolve().parent
|
||||||
|
elif files:
|
||||||
|
base_dir = Path(files[0]).resolve().parent
|
||||||
|
if base_dir is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
env_path = base_dir / "vllm_env.txt"
|
||||||
|
if not env_path.exists():
|
||||||
|
return None
|
||||||
|
df = _parse_vllm_env_txt(env_path)
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
# -----------------------------
|
||||||
# Valid max concurrency summary helpers
|
# Valid max concurrency summary helpers
|
||||||
# -----------------------------
|
# -----------------------------
|
||||||
@@ -428,7 +555,6 @@ def build_valid_max_concurrency_summary_html(
|
|||||||
|
|
||||||
summary_df = pd.DataFrame(rows)
|
summary_df = pd.DataFrame(rows)
|
||||||
|
|
||||||
# --- Coerce numeric columns so Styler doesn't miss them due to object dtype ---
|
|
||||||
for c in summary_df.columns:
|
for c in summary_df.columns:
|
||||||
if c == "Configuration":
|
if c == "Configuration":
|
||||||
continue
|
continue
|
||||||
@@ -436,12 +562,10 @@ def build_valid_max_concurrency_summary_html(
|
|||||||
|
|
||||||
both_col = f"Max {conc_col} (Both)"
|
both_col = f"Max {conc_col} (Both)"
|
||||||
|
|
||||||
# --- Strict 2-decimal formatting for ALL non-Configuration columns ---
|
|
||||||
formatters = {}
|
formatters = {}
|
||||||
for c in summary_df.columns:
|
for c in summary_df.columns:
|
||||||
if c == "Configuration":
|
if c == "Configuration":
|
||||||
continue
|
continue
|
||||||
# default argument binds per-column formatter correctly
|
|
||||||
formatters[c] = lambda v: "" if pd.isna(v) else f"{float(v):.2f}"
|
formatters[c] = lambda v: "" if pd.isna(v) else f"{float(v):.2f}"
|
||||||
|
|
||||||
styler = summary_df.style.format(formatters)
|
styler = summary_df.style.format(formatters)
|
||||||
@@ -460,6 +584,95 @@ def build_valid_max_concurrency_summary_html(
|
|||||||
return title + styler.to_html(table_attributes='border="1" class="dataframe"')
|
return title + styler.to_html(table_attributes='border="1" class="dataframe"')
|
||||||
|
|
||||||
|
|
||||||
|
def build_valid_max_concurrency_summary_df(
|
||||||
|
tput_group_df: pd.DataFrame | None,
|
||||||
|
ttft_group_df: pd.DataFrame | None,
|
||||||
|
tpot_group_df: pd.DataFrame | None,
|
||||||
|
conc_col: str,
|
||||||
|
args,
|
||||||
|
) -> pd.DataFrame | None:
|
||||||
|
if ttft_group_df is None and tpot_group_df is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
ttft_cols = (
|
||||||
|
_config_value_columns(ttft_group_df, conc_col)
|
||||||
|
if ttft_group_df is not None
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
tpot_cols = (
|
||||||
|
_config_value_columns(tpot_group_df, conc_col)
|
||||||
|
if tpot_group_df is not None
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
tput_cols = (
|
||||||
|
_config_value_columns(tput_group_df, conc_col)
|
||||||
|
if tput_group_df is not None
|
||||||
|
else []
|
||||||
|
)
|
||||||
|
|
||||||
|
if ttft_group_df is not None and tpot_group_df is not None:
|
||||||
|
cfg_cols = [c for c in ttft_cols if c in tpot_cols]
|
||||||
|
if tput_group_df is not None:
|
||||||
|
cfg_cols = [c for c in cfg_cols if c in tput_cols] or cfg_cols
|
||||||
|
else:
|
||||||
|
cfg_cols = ttft_cols or tpot_cols
|
||||||
|
|
||||||
|
if not cfg_cols:
|
||||||
|
cfg_cols = sorted(set(ttft_cols) | set(tpot_cols) | set(tput_cols), key=str)
|
||||||
|
|
||||||
|
rows = []
|
||||||
|
for cfg in cfg_cols:
|
||||||
|
ttft_max = (
|
||||||
|
_max_concurrency_ok(ttft_group_df, conc_col, cfg, args.ttft_max_ms)
|
||||||
|
if ttft_group_df is not None
|
||||||
|
else pd.NA
|
||||||
|
)
|
||||||
|
tpot_max = (
|
||||||
|
_max_concurrency_ok(tpot_group_df, conc_col, cfg, args.tpot_max_ms)
|
||||||
|
if tpot_group_df is not None
|
||||||
|
else pd.NA
|
||||||
|
)
|
||||||
|
both = (
|
||||||
|
pd.NA
|
||||||
|
if (pd.isna(ttft_max) or pd.isna(tpot_max))
|
||||||
|
else min(ttft_max, tpot_max)
|
||||||
|
)
|
||||||
|
|
||||||
|
tput_at_both = (
|
||||||
|
_value_at_concurrency(tput_group_df, conc_col, cfg, both)
|
||||||
|
if tput_group_df is not None
|
||||||
|
else pd.NA
|
||||||
|
)
|
||||||
|
ttft_at_both = (
|
||||||
|
_value_at_concurrency(ttft_group_df, conc_col, cfg, both)
|
||||||
|
if ttft_group_df is not None
|
||||||
|
else pd.NA
|
||||||
|
)
|
||||||
|
tpot_at_both = (
|
||||||
|
_value_at_concurrency(tpot_group_df, conc_col, cfg, both)
|
||||||
|
if tpot_group_df is not None
|
||||||
|
else pd.NA
|
||||||
|
)
|
||||||
|
|
||||||
|
rows.append(
|
||||||
|
{
|
||||||
|
"Configuration": cfg,
|
||||||
|
f"Max {conc_col} (TTFT ≤ {args.ttft_max_ms:g} ms)": ttft_max,
|
||||||
|
f"Max {conc_col} (TPOT ≤ {args.tpot_max_ms:g} ms)": tpot_max,
|
||||||
|
f"Max {conc_col} (Both)": both,
|
||||||
|
"Output Tput @ Both (tok/s)": tput_at_both,
|
||||||
|
"TTFT @ Both (ms)": ttft_at_both,
|
||||||
|
"TPOT @ Both (ms)": tpot_at_both,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
df = pd.DataFrame(rows)
|
||||||
|
for c in df.columns:
|
||||||
|
if c != "Configuration":
|
||||||
|
df[c] = pd.to_numeric(df[c], errors="coerce")
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
# -----------------------------
|
# -----------------------------
|
||||||
# Plot helper
|
# Plot helper
|
||||||
# -----------------------------
|
# -----------------------------
|
||||||
@@ -537,6 +750,21 @@ def build_parser() -> argparse.ArgumentParser:
|
|||||||
default=100.0,
|
default=100.0,
|
||||||
help="Reference limit for TPOT plots (ms)",
|
help="Reference limit for TPOT plots (ms)",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# ---- NEW: export options ----
|
||||||
|
parser.add_argument(
|
||||||
|
"--excel-out",
|
||||||
|
type=str,
|
||||||
|
default="perf_comparison.xlsx",
|
||||||
|
help="Write one sheet per (Model, Dataset, Input Len, Output Len).",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--csv-out-dir",
|
||||||
|
type=str,
|
||||||
|
default="",
|
||||||
|
help="If set, write per-group per-metric CSVs into this directory.",
|
||||||
|
)
|
||||||
|
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
@@ -657,7 +885,6 @@ def maybe_write_plot(
|
|||||||
markers=True,
|
markers=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Ensure plot hover + y tick labels are also 2 decimals.
|
|
||||||
fig.update_traces(hovertemplate="%{y:.2f}<extra></extra>")
|
fig.update_traces(hovertemplate="%{y:.2f}<extra></extra>")
|
||||||
fig.update_yaxes(tickformat=".2f")
|
fig.update_yaxes(tickformat=".2f")
|
||||||
|
|
||||||
@@ -730,87 +957,151 @@ def write_report_group_first(
|
|||||||
for metric_label, (df, _) in metric_cache.items()
|
for metric_label, (df, _) in metric_cache.items()
|
||||||
}
|
}
|
||||||
|
|
||||||
with open("perf_comparison.html", "w", encoding="utf-8") as main_fh:
|
csv_dir = Path(args.csv_out_dir) if args.csv_out_dir else None
|
||||||
main_fh.write('<meta charset="utf-8">\n')
|
if csv_dir:
|
||||||
for gkey in group_keys:
|
csv_dir.mkdir(parents=True, exist_ok=True)
|
||||||
gkey_tuple = normalize_group_key(gkey)
|
|
||||||
suffix = build_group_suffix(group_cols_canonical, gkey_tuple)
|
|
||||||
sub_path = group_filename(gkey_tuple)
|
|
||||||
group_header = (
|
|
||||||
'<div style="font-size: 1.4em; font-weight: 700; '
|
|
||||||
'margin: 18px 0 10px 0;">'
|
|
||||||
f"{_html.escape(suffix)}"
|
|
||||||
"</div>\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
main_fh.write(group_header)
|
excel_path = args.excel_out or "perf_comparison.xlsx"
|
||||||
with open(sub_path, "w", encoding="utf-8") as sub_fh:
|
with pd.ExcelWriter(excel_path, engine="openpyxl") as xw:
|
||||||
sub_fh.write('<meta charset="utf-8">\n')
|
# ---- Environment sheet (first) ----
|
||||||
sub_fh.write(group_header)
|
env_sheet = _sanitize_sheet_name("Environment")
|
||||||
tput_group_df = None
|
env_df = _load_env_df_for_inputs(args, files)
|
||||||
ttft_group_df = None
|
if env_df is None or env_df.empty:
|
||||||
tpot_group_df = None
|
pd.DataFrame(
|
||||||
conc_col = args.xaxis
|
[
|
||||||
|
{
|
||||||
|
"Section": "Environment",
|
||||||
|
"Key": "vllm_env.txt",
|
||||||
|
"Value": "NOT FOUND (or empty)",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
).to_excel(xw, sheet_name=env_sheet, index=False)
|
||||||
|
else:
|
||||||
|
env_df.to_excel(xw, sheet_name=env_sheet, index=False)
|
||||||
|
with open("perf_comparison.html", "w", encoding="utf-8") as main_fh:
|
||||||
|
main_fh.write('<meta charset="utf-8">\n')
|
||||||
|
for gkey in group_keys:
|
||||||
|
gkey_tuple = normalize_group_key(gkey)
|
||||||
|
suffix = build_group_suffix(group_cols_canonical, gkey_tuple)
|
||||||
|
sub_path = group_filename(gkey_tuple)
|
||||||
|
group_header = (
|
||||||
|
'<div style="font-size: 1.4em; font-weight: 700; '
|
||||||
|
'margin: 18px 0 10px 0;">'
|
||||||
|
f"{_html.escape(suffix)}"
|
||||||
|
"</div>\n"
|
||||||
|
)
|
||||||
|
|
||||||
for metric_label in plan.data_cols:
|
main_fh.write(group_header)
|
||||||
gb = metric_groupbys[metric_label]
|
|
||||||
df_sorted, raw_data_cols = metric_cache[metric_label]
|
|
||||||
|
|
||||||
try:
|
sheet = _group_to_sheet_base(group_cols_canonical, gkey_tuple)
|
||||||
group_df = gb.get_group(gkey)
|
sheet_base = sheet
|
||||||
except KeyError:
|
dedup_i = 1
|
||||||
missing = (
|
while sheet in xw.sheets:
|
||||||
'<div style="font-size: 1.1em; font-weight: 600; '
|
dedup_i += 1
|
||||||
'margin: 10px 0;">'
|
sheet = _sanitize_sheet_name(f"{sheet_base}_{dedup_i}")
|
||||||
f"{_html.escape(metric_label)} — missing for this group"
|
|
||||||
"</div>\n"
|
excel_blocks: list[tuple[str, pd.DataFrame]] = []
|
||||||
|
|
||||||
|
with open(sub_path, "w", encoding="utf-8") as sub_fh:
|
||||||
|
sub_fh.write('<meta charset="utf-8">\n')
|
||||||
|
sub_fh.write(group_header)
|
||||||
|
tput_group_df = None
|
||||||
|
ttft_group_df = None
|
||||||
|
tpot_group_df = None
|
||||||
|
conc_col = args.xaxis
|
||||||
|
|
||||||
|
for metric_label in plan.data_cols:
|
||||||
|
gb = metric_groupbys[metric_label]
|
||||||
|
df_sorted, raw_data_cols = metric_cache[metric_label]
|
||||||
|
|
||||||
|
try:
|
||||||
|
group_df = gb.get_group(gkey)
|
||||||
|
except KeyError:
|
||||||
|
missing = (
|
||||||
|
'<div style="font-size: 1.1em; font-weight: 600; '
|
||||||
|
'margin: 10px 0;">'
|
||||||
|
f"{_html.escape(metric_label)} — missing for this group"
|
||||||
|
"</div>\n"
|
||||||
|
)
|
||||||
|
main_fh.write(missing)
|
||||||
|
sub_fh.write(missing)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if conc_col not in group_df.columns:
|
||||||
|
conc_col = _find_concurrency_col(group_df)
|
||||||
|
|
||||||
|
mn = metric_label.lower().strip()
|
||||||
|
if "tok/s" in mn:
|
||||||
|
tput_group_df = group_df
|
||||||
|
elif "ttft" in mn:
|
||||||
|
ttft_group_df = group_df
|
||||||
|
elif mn in ("p99", "median") or "tpot" in mn:
|
||||||
|
tpot_group_df = group_df
|
||||||
|
|
||||||
|
display_group = group_df.drop(
|
||||||
|
columns=group_cols_canonical, errors="ignore"
|
||||||
)
|
)
|
||||||
|
|
||||||
main_fh.write(missing)
|
html = render_metric_table_html(
|
||||||
sub_fh.write(missing)
|
display_group, metric_label, suffix, args
|
||||||
continue
|
)
|
||||||
|
main_fh.write(html)
|
||||||
|
sub_fh.write(html)
|
||||||
|
|
||||||
if conc_col not in group_df.columns:
|
maybe_write_plot(
|
||||||
conc_col = _find_concurrency_col(group_df)
|
main_fh,
|
||||||
|
sub_fh,
|
||||||
|
group_df=group_df,
|
||||||
|
raw_data_cols=raw_data_cols,
|
||||||
|
metric_label=metric_label,
|
||||||
|
y_axis_col=y_axis_col,
|
||||||
|
args=args,
|
||||||
|
)
|
||||||
|
|
||||||
mn = metric_label.lower().strip()
|
excel_blocks.append(
|
||||||
if "tok/s" in mn:
|
(metric_label, display_group.reset_index(drop=True))
|
||||||
tput_group_df = group_df
|
)
|
||||||
elif "ttft" in mn:
|
if csv_dir:
|
||||||
ttft_group_df = group_df
|
fn = _safe_filename(
|
||||||
elif mn in ("p99", "median") or "tpot" in mn:
|
f"{sheet}__{metric_label}".replace(" ", "_").replace(
|
||||||
tpot_group_df = group_df
|
"/", "_"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
display_group.to_csv(csv_dir / f"{fn}.csv", index=False)
|
||||||
|
|
||||||
display_group = group_df.drop(
|
summary_html = build_valid_max_concurrency_summary_html(
|
||||||
columns=group_cols_canonical, errors="ignore"
|
tput_group_df=tput_group_df,
|
||||||
)
|
ttft_group_df=ttft_group_df,
|
||||||
|
tpot_group_df=tpot_group_df,
|
||||||
html = render_metric_table_html(
|
conc_col=conc_col,
|
||||||
display_group, metric_label, suffix, args
|
|
||||||
)
|
|
||||||
main_fh.write(html)
|
|
||||||
sub_fh.write(html)
|
|
||||||
|
|
||||||
maybe_write_plot(
|
|
||||||
main_fh,
|
|
||||||
sub_fh,
|
|
||||||
group_df=group_df,
|
|
||||||
raw_data_cols=raw_data_cols,
|
|
||||||
metric_label=metric_label,
|
|
||||||
y_axis_col=y_axis_col,
|
|
||||||
args=args,
|
args=args,
|
||||||
)
|
)
|
||||||
|
if summary_html:
|
||||||
|
main_fh.write(summary_html)
|
||||||
|
sub_fh.write(summary_html)
|
||||||
|
|
||||||
summary_html = build_valid_max_concurrency_summary_html(
|
summary_df = build_valid_max_concurrency_summary_df(
|
||||||
tput_group_df=tput_group_df,
|
tput_group_df=tput_group_df,
|
||||||
ttft_group_df=ttft_group_df,
|
ttft_group_df=ttft_group_df,
|
||||||
tpot_group_df=tpot_group_df,
|
tpot_group_df=tpot_group_df,
|
||||||
conc_col=conc_col,
|
conc_col=conc_col,
|
||||||
args=args,
|
args=args,
|
||||||
)
|
)
|
||||||
if summary_html:
|
if summary_df is not None:
|
||||||
main_fh.write(summary_html)
|
excel_blocks.append(
|
||||||
sub_fh.write(summary_html)
|
("Valid Max Concurrency Summary", summary_df)
|
||||||
|
)
|
||||||
|
if csv_dir:
|
||||||
|
fn = _safe_filename(
|
||||||
|
f"{sheet}__Valid_Max_Concurrency_Summary"
|
||||||
|
)
|
||||||
|
summary_df.to_csv(csv_dir / f"{fn}.csv", index=False)
|
||||||
|
|
||||||
|
_write_tables_to_excel_sheet(xw, sheet, excel_blocks)
|
||||||
|
|
||||||
|
print(f"Wrote Excel: {excel_path}")
|
||||||
|
if csv_dir:
|
||||||
|
print(f"Wrote CSVs under: {csv_dir}")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|||||||
@@ -1,6 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# This script should be run inside the CI process
|
|
||||||
# This script assumes that we are already inside the vllm/ directory
|
# This script assumes that we are already inside the vllm/ directory
|
||||||
# Benchmarking results will be available inside vllm/benchmarks/results/
|
# Benchmarking results will be available inside vllm/benchmarks/results/
|
||||||
|
|
||||||
@@ -9,14 +7,19 @@
|
|||||||
set -x
|
set -x
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
|
# Environment-driven debug controls (like ON_CPU=1)
|
||||||
|
DRY_RUN="${DRY_RUN:-0}"
|
||||||
|
MODEL_FILTER="${MODEL_FILTER:-}"
|
||||||
|
DTYPE_FILTER="${DTYPE_FILTER:-}"
|
||||||
|
|
||||||
check_gpus() {
|
check_gpus() {
|
||||||
if command -v nvidia-smi; then
|
if command -v nvidia-smi; then
|
||||||
# check the number of GPUs and GPU type.
|
# check the number of GPUs and GPU type.
|
||||||
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
declare -g gpu_count=$(nvidia-smi --list-gpus | grep -c . || true)
|
||||||
elif command -v amd-smi; then
|
elif command -v amd-smi; then
|
||||||
declare -g gpu_count=$(amd-smi list | grep 'GPU' | wc -l)
|
declare -g gpu_count=$(amd-smi list | grep -c 'GPU' || true)
|
||||||
elif command -v hl-smi; then
|
elif command -v hl-smi; then
|
||||||
declare -g gpu_count=$(hl-smi --list | grep -i "Module ID" | wc -l)
|
declare -g gpu_count=$(hl-smi --list | grep -ci "Module ID" || true)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $gpu_count -gt 0 ]]; then
|
if [[ $gpu_count -gt 0 ]]; then
|
||||||
@@ -44,7 +47,7 @@ check_cpus() {
|
|||||||
declare -g numa_count=$(lscpu | grep "NUMA node(s):" | awk '{print $3}')
|
declare -g numa_count=$(lscpu | grep "NUMA node(s):" | awk '{print $3}')
|
||||||
if [[ $numa_count -gt 0 ]]; then
|
if [[ $numa_count -gt 0 ]]; then
|
||||||
echo "NUMA found."
|
echo "NUMA found."
|
||||||
echo $numa_count
|
echo "$numa_count"
|
||||||
else
|
else
|
||||||
echo "Need at least 1 NUMA to run benchmarking."
|
echo "Need at least 1 NUMA to run benchmarking."
|
||||||
exit 1
|
exit 1
|
||||||
@@ -112,13 +115,12 @@ json2envs() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
wait_for_server() {
|
wait_for_server() {
|
||||||
# wait for vllm server to start
|
|
||||||
# return 1 if vllm server crashes
|
|
||||||
local timeout_val="1200"
|
local timeout_val="1200"
|
||||||
timeout "$timeout_val" bash -c '
|
timeout "$timeout_val" bash -c '
|
||||||
until curl -X POST localhost:8000/v1/completions; do
|
until curl -sf http://localhost:8000/v1/models >/dev/null; do
|
||||||
sleep 1
|
sleep 1
|
||||||
done' && return 0 || return 1
|
done
|
||||||
|
'
|
||||||
}
|
}
|
||||||
|
|
||||||
kill_processes_launched_by_current_bash() {
|
kill_processes_launched_by_current_bash() {
|
||||||
@@ -252,37 +254,16 @@ run_benchmark_tests() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
run_latency_tests() {
|
run_latency_tests() { run_benchmark_tests "latency" "$1"; }
|
||||||
run_benchmark_tests "latency" "$1"
|
run_startup_tests() { run_benchmark_tests "startup" "$1"; }
|
||||||
}
|
run_throughput_tests() { run_benchmark_tests "throughput" "$1"; }
|
||||||
|
|
||||||
run_startup_tests() {
|
merge_serving_tests_stream() {
|
||||||
run_benchmark_tests "startup" "$1"
|
# Emit merged serving test objects, optionally filtered by MODEL_FILTER/DTYPE_FILTER in DRY_RUN mode.
|
||||||
}
|
# This helper does NOT modify JSON; it only filters the stream in dry-run mode.
|
||||||
|
local serving_test_file="$1"
|
||||||
run_throughput_tests() {
|
# shellcheck disable=SC2016
|
||||||
run_benchmark_tests "throughput" "$1"
|
local merged='
|
||||||
}
|
|
||||||
|
|
||||||
run_serving_tests() {
|
|
||||||
# run serving tests using `vllm bench serve` command
|
|
||||||
# $1: a json file specifying serving test cases
|
|
||||||
#
|
|
||||||
# Supported JSON formats:
|
|
||||||
# 1) Plain format: top-level array
|
|
||||||
# [ { "test_name": "...", "server_parameters": {...}, ... }, ... ]
|
|
||||||
#
|
|
||||||
# 2) Default parameters field + plain format tests
|
|
||||||
# {
|
|
||||||
# "defaults": { ... },
|
|
||||||
# "tests": [ { "test_name": "...", "server_parameters": {...}, ... }, ... ]
|
|
||||||
# }
|
|
||||||
|
|
||||||
local serving_test_file
|
|
||||||
serving_test_file=$1
|
|
||||||
|
|
||||||
# Iterate over serving tests
|
|
||||||
jq -c '
|
|
||||||
if type == "array" then
|
if type == "array" then
|
||||||
# Plain format: test cases array
|
# Plain format: test cases array
|
||||||
.[]
|
.[]
|
||||||
@@ -304,7 +285,50 @@ run_serving_tests() {
|
|||||||
else
|
else
|
||||||
error("Unsupported serving test file format: must be array or object with .tests")
|
error("Unsupported serving test file format: must be array or object with .tests")
|
||||||
end
|
end
|
||||||
' "$serving_test_file" | while read -r params; do
|
'
|
||||||
|
|
||||||
|
jq -c "$merged" "$serving_test_file" | \
|
||||||
|
if [[ "${DRY_RUN:-0}" == "1" && ( "${MODEL_FILTER}${DTYPE_FILTER}" != "" ) ]]; then
|
||||||
|
jq -c --arg model "$MODEL_FILTER" --arg dtype "$DTYPE_FILTER" '
|
||||||
|
select((($model|length)==0)
|
||||||
|
or ((.server_parameters.model // "") == $model)
|
||||||
|
or ((.client_parameters.model // "") == $model))
|
||||||
|
| select((($dtype|length)==0) or ((.server_parameters.dtype // "") == $dtype))
|
||||||
|
'
|
||||||
|
else
|
||||||
|
cat
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
run_serving_tests() {
|
||||||
|
# run serving tests using `vllm bench serve` command
|
||||||
|
# $1: a json file specifying serving test cases
|
||||||
|
#
|
||||||
|
# Supported JSON formats:
|
||||||
|
# 1) Plain format: top-level array
|
||||||
|
# [ { "test_name": "...", "server_parameters": {...}, ... }, ... ]
|
||||||
|
#
|
||||||
|
# 2) Default parameters field + plain format tests
|
||||||
|
# {
|
||||||
|
# "defaults": { ... },
|
||||||
|
# "tests": [ { "test_name": "...", "server_parameters": {...}, ... }, ... ]
|
||||||
|
# }
|
||||||
|
|
||||||
|
local serving_test_file
|
||||||
|
serving_test_file=$1
|
||||||
|
|
||||||
|
# In dry-run mode, if filters are provided but no tests match, fail fast.
|
||||||
|
if [[ "${DRY_RUN:-0}" == "1" && ( "${MODEL_FILTER}${DTYPE_FILTER}" != "" ) ]]; then
|
||||||
|
local count
|
||||||
|
count=$(merge_serving_tests_stream "$serving_test_file" | wc -l | tr -d ' ')
|
||||||
|
if [[ "$count" -eq 0 ]]; then
|
||||||
|
echo "No matching serving tests found in $serving_test_file for model='$MODEL_FILTER' dtype='$DTYPE_FILTER'." >&2
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Iterate over serving tests (merged + optional filtered stream)
|
||||||
|
merge_serving_tests_stream "$serving_test_file" | while read -r params; do
|
||||||
# get the test name, and append the GPU type back to it.
|
# get the test name, and append the GPU type back to it.
|
||||||
test_name=$(echo "$params" | jq -r '.test_name')
|
test_name=$(echo "$params" | jq -r '.test_name')
|
||||||
if [[ ! "$test_name" =~ ^serving_ ]]; then
|
if [[ ! "$test_name" =~ ^serving_ ]]; then
|
||||||
@@ -373,7 +397,7 @@ run_serving_tests() {
|
|||||||
echo "Server command: $server_command"
|
echo "Server command: $server_command"
|
||||||
# support remote vllm server
|
# support remote vllm server
|
||||||
client_remote_args=""
|
client_remote_args=""
|
||||||
if [[ -z "${REMOTE_HOST}" ]]; then
|
if [[ -z "${REMOTE_HOST}" && "${DRY_RUN:-0}" != "1" ]]; then
|
||||||
bash -c "$server_command" &
|
bash -c "$server_command" &
|
||||||
server_pid=$!
|
server_pid=$!
|
||||||
# wait until the server is alive
|
# wait until the server is alive
|
||||||
@@ -384,6 +408,9 @@ run_serving_tests() {
|
|||||||
echo ""
|
echo ""
|
||||||
echo "vLLM failed to start within the timeout period."
|
echo "vLLM failed to start within the timeout period."
|
||||||
fi
|
fi
|
||||||
|
elif [[ "${DRY_RUN:-0}" == "1" ]]; then
|
||||||
|
# dry-run: don't start server
|
||||||
|
echo "Dry Run."
|
||||||
else
|
else
|
||||||
server_command="Using Remote Server $REMOTE_HOST $REMOTE_PORT"
|
server_command="Using Remote Server $REMOTE_HOST $REMOTE_PORT"
|
||||||
if [[ ${REMOTE_PORT} ]]; then
|
if [[ ${REMOTE_PORT} ]]; then
|
||||||
@@ -402,14 +429,12 @@ run_serving_tests() {
|
|||||||
for qps in $qps_list; do
|
for qps in $qps_list; do
|
||||||
# remove the surrounding single quote from qps
|
# remove the surrounding single quote from qps
|
||||||
if [[ "$qps" == *"inf"* ]]; then
|
if [[ "$qps" == *"inf"* ]]; then
|
||||||
echo "qps was $qps"
|
|
||||||
qps="inf"
|
qps="inf"
|
||||||
echo "now qps is $qps"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# iterate over different max_concurrency
|
# iterate over different max_concurrency
|
||||||
for max_concurrency in $max_concurrency_list; do
|
for max_concurrency in $max_concurrency_list; do
|
||||||
new_test_name=$test_name"_qps_"$qps"_concurrency_"$max_concurrency
|
new_test_name="${test_name}_qps_${qps}_concurrency_${max_concurrency}"
|
||||||
echo " new test name $new_test_name"
|
echo " new test name $new_test_name"
|
||||||
# pass the tensor parallel size, the compilation mode, and the optimization
|
# pass the tensor parallel size, the compilation mode, and the optimization
|
||||||
# level to the client so that they can be used on the benchmark dashboard
|
# level to the client so that they can be used on the benchmark dashboard
|
||||||
@@ -425,7 +450,9 @@ run_serving_tests() {
|
|||||||
echo "Running test case $test_name with qps $qps"
|
echo "Running test case $test_name with qps $qps"
|
||||||
echo "Client command: $client_command"
|
echo "Client command: $client_command"
|
||||||
|
|
||||||
bash -c "$client_command"
|
if [[ "${DRY_RUN:-0}" != "1" ]]; then
|
||||||
|
bash -c "$client_command"
|
||||||
|
fi
|
||||||
|
|
||||||
# record the benchmarking commands
|
# record the benchmarking commands
|
||||||
jq_output=$(jq -n \
|
jq_output=$(jq -n \
|
||||||
@@ -443,12 +470,15 @@ run_serving_tests() {
|
|||||||
done
|
done
|
||||||
|
|
||||||
# clean up
|
# clean up
|
||||||
kill -9 $server_pid
|
if [[ "${DRY_RUN:-0}" != "1" ]]; then
|
||||||
kill_gpu_processes
|
kill -9 "$server_pid"
|
||||||
|
kill_gpu_processes
|
||||||
|
fi
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
main() {
|
main() {
|
||||||
|
|
||||||
local ARCH
|
local ARCH
|
||||||
ARCH=''
|
ARCH=''
|
||||||
if [[ "$ON_CPU" == "1" ]]; then
|
if [[ "$ON_CPU" == "1" ]]; then
|
||||||
@@ -458,7 +488,13 @@ main() {
|
|||||||
check_gpus
|
check_gpus
|
||||||
ARCH="$arch_suffix"
|
ARCH="$arch_suffix"
|
||||||
fi
|
fi
|
||||||
check_hf_token
|
|
||||||
|
# DRY_RUN does not execute vLLM; do not require HF_TOKEN.
|
||||||
|
if [[ "${DRY_RUN:-0}" != "1" ]]; then
|
||||||
|
check_hf_token
|
||||||
|
else
|
||||||
|
echo "DRY_RUN=1 -> skip HF_TOKEN validation"
|
||||||
|
fi
|
||||||
|
|
||||||
# dependencies
|
# dependencies
|
||||||
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
||||||
@@ -479,11 +515,16 @@ main() {
|
|||||||
|
|
||||||
# dump vllm info via vllm collect-env
|
# dump vllm info via vllm collect-env
|
||||||
env_output=$(vllm collect-env)
|
env_output=$(vllm collect-env)
|
||||||
|
|
||||||
echo "$env_output" >"$RESULTS_FOLDER/vllm_env.txt"
|
echo "$env_output" >"$RESULTS_FOLDER/vllm_env.txt"
|
||||||
|
|
||||||
# benchmarking
|
# benchmarking
|
||||||
run_serving_tests $QUICK_BENCHMARK_ROOT/tests/"${SERVING_JSON:-serving-tests$ARCH.json}"
|
run_serving_tests $QUICK_BENCHMARK_ROOT/tests/"${SERVING_JSON:-serving-tests$ARCH.json}" || exit $?
|
||||||
|
|
||||||
|
if [[ "${DRY_RUN:-0}" == "1" ]]; then
|
||||||
|
echo "DRY_RUN=1 -> skip latency/startup/throughput suites"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
run_latency_tests $QUICK_BENCHMARK_ROOT/tests/"${LATENCY_JSON:-latency-tests$ARCH.json}"
|
run_latency_tests $QUICK_BENCHMARK_ROOT/tests/"${LATENCY_JSON:-latency-tests$ARCH.json}"
|
||||||
run_startup_tests $QUICK_BENCHMARK_ROOT/tests/"${STARTUP_JSON:-startup-tests$ARCH.json}"
|
run_startup_tests $QUICK_BENCHMARK_ROOT/tests/"${STARTUP_JSON:-startup-tests$ARCH.json}"
|
||||||
run_throughput_tests $QUICK_BENCHMARK_ROOT/tests/"${THROUGHPUT_JSON:-throughput-tests$ARCH.json}"
|
run_throughput_tests $QUICK_BENCHMARK_ROOT/tests/"${THROUGHPUT_JSON:-throughput-tests$ARCH.json}"
|
||||||
|
|||||||
@@ -51,5 +51,56 @@
|
|||||||
"max-model-len": 256,
|
"max-model-len": 256,
|
||||||
"async-scheduling": ""
|
"async-scheduling": ""
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "latency_deepseek_r1",
|
||||||
|
"environment_variables": {
|
||||||
|
"PT_HPU_LAZY_MODE": 1,
|
||||||
|
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||||
|
"VLLM_CONTIGUOUS_PA": 1,
|
||||||
|
"VLLM_DEFRAG": 1
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"model": "deepseek-ai/DeepSeek-R1",
|
||||||
|
"tensor_parallel_size": 8,
|
||||||
|
"load_format": "dummy",
|
||||||
|
"max-model-len": 2048,
|
||||||
|
"dtype": "bfloat16"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "latency_llama4_maverick_17b128e_instruct_fp8",
|
||||||
|
"environment_variables": {
|
||||||
|
"PT_HPU_LAZY_MODE": 1,
|
||||||
|
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||||
|
"VLLM_CONTIGUOUS_PA": 1,
|
||||||
|
"VLLM_DEFRAG": 1
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"model": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
|
"tensor_parallel_size": 8,
|
||||||
|
"max-model-len": 512,
|
||||||
|
"max-num-seqs": 128,
|
||||||
|
"async-scheduling": "",
|
||||||
|
"gpu-memory-utilization": 0.95,
|
||||||
|
"enable_expert_parallel": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "latency_qwen3_8b",
|
||||||
|
"environment_variables": {
|
||||||
|
"PT_HPU_LAZY_MODE": 1,
|
||||||
|
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||||
|
"VLLM_CONTIGUOUS_PA": 1,
|
||||||
|
"VLLM_DEFRAG": 1
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"model": "Qwen/Qwen3-8B",
|
||||||
|
"tensor_parallel_size": 1,
|
||||||
|
"max-model-len": 2048,
|
||||||
|
"max-num-seqs": 128,
|
||||||
|
"dtype": "bfloat16",
|
||||||
|
"async-scheduling": ""
|
||||||
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -0,0 +1,41 @@
|
|||||||
|
{
|
||||||
|
"defaults": {
|
||||||
|
"qps_list": [
|
||||||
|
"inf"
|
||||||
|
],
|
||||||
|
"max_concurrency_list": [
|
||||||
|
32,
|
||||||
|
64,
|
||||||
|
128
|
||||||
|
],
|
||||||
|
"server_environment_variables": {
|
||||||
|
"VLLM_RPC_TIMEOUT": 100000,
|
||||||
|
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1,
|
||||||
|
"VLLM_ENGINE_ITERATION_TIMEOUT_S": 120,
|
||||||
|
"VLLM_CPU_SGL_KERNEL": 1,
|
||||||
|
"VLLM_CPU_KVCACHE_SPACE": 40
|
||||||
|
},
|
||||||
|
"server_parameters": {
|
||||||
|
"dtype": "bfloat16",
|
||||||
|
"model": "jinaai/jina-embeddings-v3",
|
||||||
|
"trust_remote_code": ""
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "jinaai/jina-embeddings-v3",
|
||||||
|
"backend": "openai-embeddings",
|
||||||
|
"endpoint": "/v1/embeddings",
|
||||||
|
"dataset_name": "sharegpt",
|
||||||
|
"dataset_path": "ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||||
|
"num_prompts": 200
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"tests": [
|
||||||
|
{
|
||||||
|
"test_name": "serving_jina_embed_v3_tp1_sharegpt",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,283 @@
|
|||||||
|
{
|
||||||
|
"defaults": {
|
||||||
|
"qps_list": [
|
||||||
|
"inf"
|
||||||
|
],
|
||||||
|
"max_concurrency_list": [12, 16, 24, 32, 64, 128, 200],
|
||||||
|
"server_environment_variables": {
|
||||||
|
"VLLM_RPC_TIMEOUT": 100000,
|
||||||
|
"VLLM_ALLOW_LONG_MAX_MODEL_LEN": 1,
|
||||||
|
"VLLM_ENGINE_ITERATION_TIMEOUT_S": 120,
|
||||||
|
"VLLM_CPU_SGL_KERNEL": 1,
|
||||||
|
"VLLM_CPU_KVCACHE_SPACE": 40
|
||||||
|
},
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
||||||
|
"tensor_parallel_size": 1,
|
||||||
|
"dtype": "bfloat16",
|
||||||
|
"distributed_executor_backend": "mp",
|
||||||
|
"block_size": 128,
|
||||||
|
"trust_remote_code": "",
|
||||||
|
"disable_log_stats": "",
|
||||||
|
"max_num_batched_tokens": 2048,
|
||||||
|
"max_num_seqs": 256
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
||||||
|
"backend": "vllm",
|
||||||
|
"ignore-eos": "",
|
||||||
|
"num_prompts": 200
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"tests": [
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp1_sharegpt",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "sharegpt",
|
||||||
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp2_sharegpt",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 2
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "sharegpt",
|
||||||
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp1_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp2_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 2
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp4_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 4
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp1_random_128_2048",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 2048
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp2_random_128_2048",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 2
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 2048
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp4_random_128_2048",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 4
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 2048
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp1_random_2048_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 2048,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp2_random_2048_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 2
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 2048,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_tp4_random_2048_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"tensor_parallel_size": 4
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 2048,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_int4_tp1_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_int4_tp2_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
||||||
|
"tensor_parallel_size": 2
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama8B_int4_tp4_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
||||||
|
"tensor_parallel_size": 4
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama3B_tp1_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "meta-llama/Llama-3.2-3B-Instruct",
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "meta-llama/Llama-3.2-3B-Instruct",
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_granite2B_tp1_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "ibm-granite/granite-3.2-2b-instruct",
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "ibm-granite/granite-3.2-2b-instruct",
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_qwen1.7B_tp1_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "Qwen/Qwen3-1.7B",
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "Qwen/Qwen3-1.7B",
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_qwen4B_tp1_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "Qwen/Qwen3-4B",
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "Qwen/Qwen3-4B",
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_qwen8B_tp1_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "Qwen/Qwen3-8B",
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "Qwen/Qwen3-8B",
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_glm9B_tp1_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "zai-org/glm-4-9b-hf",
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "zai-org/glm-4-9b-hf",
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_gemma7B_tp1_random_128_128",
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "google/gemma-7b",
|
||||||
|
"tensor_parallel_size": 1
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "google/gemma-7b",
|
||||||
|
"dataset_name": "random",
|
||||||
|
"random-input-len": 128,
|
||||||
|
"random-output-len": 128
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -148,136 +148,6 @@
|
|||||||
"random-input-len": 2048,
|
"random-input-len": 2048,
|
||||||
"random-output-len": 128
|
"random-output-len": 128
|
||||||
}
|
}
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_int4_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_int4_tp2_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"tensor_parallel_size": 2
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama8B_int4_tp4_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"tensor_parallel_size": 4
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_llama3B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.2-3B-Instruct",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "meta-llama/Llama-3.2-3B-Instruct",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_granite2B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "ibm-granite/granite-3.2-2b-instruct",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "ibm-granite/granite-3.2-2b-instruct",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_qwen1.7B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-1.7B",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-1.7B",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_qwen4B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-4B",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-4B",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_qwen8B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-8B",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "Qwen/Qwen3-8B",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_glm9B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "zai-org/glm-4-9b-hf",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "zai-org/glm-4-9b-hf",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"test_name": "serving_gemma7B_tp1_random_128_128",
|
|
||||||
"server_parameters": {
|
|
||||||
"model": "google/gemma-7b",
|
|
||||||
"tensor_parallel_size": 1
|
|
||||||
},
|
|
||||||
"client_parameters": {
|
|
||||||
"model": "google/gemma-7b",
|
|
||||||
"dataset_name": "random",
|
|
||||||
"random-input-len": 128,
|
|
||||||
"random-output-len": 128
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -78,5 +78,84 @@
|
|||||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||||
"num_prompts": 200
|
"num_prompts": 200
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_deepseek_r1",
|
||||||
|
"qps_list": [1, 4, 16, "inf"],
|
||||||
|
"server_environment_variables": {
|
||||||
|
"PT_HPU_LAZY_MODE": 1,
|
||||||
|
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||||
|
"VLLM_CONTIGUOUS_PA": 1,
|
||||||
|
"VLLM_DEFRAG": 1
|
||||||
|
},
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "deepseek-ai/DeepSeek-R1",
|
||||||
|
"tensor_parallel_size": 8,
|
||||||
|
"swap_space": 16,
|
||||||
|
"disable_log_stats": "",
|
||||||
|
"load_format": "dummy",
|
||||||
|
"max-model-len": 2048,
|
||||||
|
"max-num-seqs": 200,
|
||||||
|
"async-scheduling": "",
|
||||||
|
"dtype": "bfloat16"
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "deepseek-ai/DeepSeek-R1",
|
||||||
|
"backend": "vllm",
|
||||||
|
"dataset_name": "sharegpt",
|
||||||
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||||
|
"num_prompts": 200
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_llama4_maverick_17b128e_instruct_fp8",
|
||||||
|
"qps_list": [1, 4, 16, "inf"],
|
||||||
|
"server_environment_variables": {
|
||||||
|
"PT_HPU_LAZY_MODE": 1,
|
||||||
|
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||||
|
"VLLM_CONTIGUOUS_PA": 1,
|
||||||
|
"VLLM_DEFRAG": 1
|
||||||
|
},
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
|
"tensor_parallel_size": 8,
|
||||||
|
"disable_log_stats": "",
|
||||||
|
"max-model-len": 2048,
|
||||||
|
"max-num-seqs": 128,
|
||||||
|
"async-scheduling": "",
|
||||||
|
"enable_expert_parallel": "",
|
||||||
|
"max-num-batched-tokens": 4096
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
|
"backend": "vllm",
|
||||||
|
"dataset_name": "sharegpt",
|
||||||
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||||
|
"num_prompts": 200
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "serving_qwen3_8b",
|
||||||
|
"qps_list": [1, 4, 10, "inf"],
|
||||||
|
"server_environment_variables": {
|
||||||
|
"PT_HPU_LAZY_MODE": 1,
|
||||||
|
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||||
|
"VLLM_CONTIGUOUS_PA": 1,
|
||||||
|
"VLLM_DEFRAG": 1
|
||||||
|
},
|
||||||
|
"server_parameters": {
|
||||||
|
"model": "Qwen/Qwen-3-8B",
|
||||||
|
"tensor_parallel_size": 1,
|
||||||
|
"dtype": "bfloat16",
|
||||||
|
"disable_log_stats": "",
|
||||||
|
"async-scheduling": ""
|
||||||
|
},
|
||||||
|
"client_parameters": {
|
||||||
|
"model": "Qwen/Qwen-3-8B",
|
||||||
|
"backend": "vllm",
|
||||||
|
"dataset_name": "sharegpt",
|
||||||
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||||
|
"num_prompts": 200
|
||||||
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -57,5 +57,67 @@
|
|||||||
"max-num-seqs": 512,
|
"max-num-seqs": 512,
|
||||||
"async-scheduling": ""
|
"async-scheduling": ""
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "throughput_deepseek_r1",
|
||||||
|
"environment_variables": {
|
||||||
|
"PT_HPU_LAZY_MODE": 1,
|
||||||
|
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||||
|
"VLLM_CONTIGUOUS_PA": 1,
|
||||||
|
"VLLM_DEFRAG": 1
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"model": "deepseek-ai/DeepSeek-R1",
|
||||||
|
"tensor_parallel_size": 8,
|
||||||
|
"load_format": "dummy",
|
||||||
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||||
|
"dataset_name": "sharegpt",
|
||||||
|
"num_prompts": 1000,
|
||||||
|
"backend": "vllm",
|
||||||
|
"max-model-len": 2048,
|
||||||
|
"max-num-seqs": 384,
|
||||||
|
"async-scheduling": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "throughput_llama4_maverick_17b128e_instruct_fp8",
|
||||||
|
"environment_variables": {
|
||||||
|
"PT_HPU_LAZY_MODE": 1,
|
||||||
|
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||||
|
"VLLM_CONTIGUOUS_PA": 1,
|
||||||
|
"VLLM_DEFRAG": 1
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"model": "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||||
|
"tensor_parallel_size": 8,
|
||||||
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||||
|
"dataset_name": "sharegpt",
|
||||||
|
"num_prompts": 1000,
|
||||||
|
"backend": "vllm",
|
||||||
|
"max-model-len": 2048,
|
||||||
|
"max-num-seqs": 512,
|
||||||
|
"async-scheduling": "",
|
||||||
|
"enable_expert_parallel": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test_name": "throughput_qwen3_8b",
|
||||||
|
"environment_variables": {
|
||||||
|
"PT_HPU_LAZY_MODE": 1,
|
||||||
|
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||||
|
"VLLM_CONTIGUOUS_PA": 1,
|
||||||
|
"VLLM_DEFRAG": 1
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"model": "Qwen/Qwen-3-8B",
|
||||||
|
"tensor_parallel_size": 1,
|
||||||
|
"load_format": "dummy",
|
||||||
|
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||||
|
"dataset_name": "sharegpt",
|
||||||
|
"num_prompts": 1000,
|
||||||
|
"max-num-seqs": 512,
|
||||||
|
"backend": "vllm",
|
||||||
|
"async-scheduling": ""
|
||||||
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cpu-cp38-
|
|||||||
To download and upload the image:
|
To download and upload the image:
|
||||||
|
|
||||||
\`\`\`
|
\`\`\`
|
||||||
Download images:
|
# Download images:
|
||||||
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64
|
||||||
@@ -35,8 +35,12 @@ docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64
|
|||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64-cu130
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-aarch64-cu130
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm
|
||||||
|
docker pull public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v${RELEASE_VERSION}
|
||||||
|
docker pull public.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo:v${RELEASE_VERSION}
|
||||||
|
|
||||||
Tag and push images:
|
# Tag and push images:
|
||||||
|
|
||||||
|
## CUDA
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64 vllm/vllm-openai:x86_64
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-x86_64 vllm/vllm-openai:x86_64
|
||||||
docker tag vllm/vllm-openai:x86_64 vllm/vllm-openai:latest-x86_64
|
docker tag vllm/vllm-openai:x86_64 vllm/vllm-openai:latest-x86_64
|
||||||
@@ -62,34 +66,21 @@ docker tag vllm/vllm-openai:aarch64-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-a
|
|||||||
docker push vllm/vllm-openai:latest-aarch64-cu130
|
docker push vllm/vllm-openai:latest-aarch64-cu130
|
||||||
docker push vllm/vllm-openai:v${RELEASE_VERSION}-aarch64-cu130
|
docker push vllm/vllm-openai:v${RELEASE_VERSION}-aarch64-cu130
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-rocm
|
## ROCm
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai-rocm:latest
|
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai-rocm:v${RELEASE_VERSION}-rocm
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}
|
||||||
docker push vllm/vllm-openai-rocm:latest
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT} vllm/vllm-openai-rocm:latest
|
||||||
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}-rocm
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT} vllm/vllm-openai-rocm:v${RELEASE_VERSION}
|
||||||
|
docker push vllm/vllm-openai-rocm:latest
|
||||||
|
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}
|
||||||
|
|
||||||
Create multi-arch manifest:
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:${BUILDKITE_COMMIT}-rocm-base vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:latest-base
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:latest-base
|
||||||
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
docker tag vllm/vllm-openai-rocm:${BUILDKITE_COMMIT}-base vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
||||||
docker push vllm/vllm-openai-rocm:latest-base
|
docker push vllm/vllm-openai-rocm:latest-base
|
||||||
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
docker push vllm/vllm-openai-rocm:v${RELEASE_VERSION}-base
|
||||||
|
|
||||||
docker manifest rm vllm/vllm-openai:latest
|
## CPU
|
||||||
docker manifest create vllm/vllm-openai:latest vllm/vllm-openai:latest-x86_64 vllm/vllm-openai:latest-aarch64
|
|
||||||
docker manifest create vllm/vllm-openai:v${RELEASE_VERSION} vllm/vllm-openai:v${RELEASE_VERSION}-x86_64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
|
||||||
docker manifest push vllm/vllm-openai:latest
|
|
||||||
docker manifest push vllm/vllm-openai:v${RELEASE_VERSION}
|
|
||||||
|
|
||||||
docker manifest rm vllm/vllm-openai:latest-cu130
|
|
||||||
docker manifest create vllm/vllm-openai:latest-cu130 vllm/vllm-openai:latest-x86_64-cu130 vllm/vllm-openai:latest-aarch64-cu130
|
|
||||||
docker manifest create vllm/vllm-openai:v${RELEASE_VERSION}-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-x86_64-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64-cu130
|
|
||||||
docker manifest push vllm/vllm-openai:latest-cu130
|
|
||||||
docker manifest push vllm/vllm-openai:v${RELEASE_VERSION}-cu130
|
|
||||||
|
|
||||||
# CPU images (vllm/vllm-openai-cpu)
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v${RELEASE_VERSION}
|
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-arm64-cpu-release-repo:v${RELEASE_VERSION}
|
|
||||||
|
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v${RELEASE_VERSION} vllm/vllm-openai-cpu:x86_64
|
docker tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:v${RELEASE_VERSION} vllm/vllm-openai-cpu:x86_64
|
||||||
docker tag vllm/vllm-openai-cpu:x86_64 vllm/vllm-openai-cpu:latest-x86_64
|
docker tag vllm/vllm-openai-cpu:x86_64 vllm/vllm-openai-cpu:latest-x86_64
|
||||||
@@ -103,6 +94,20 @@ docker tag vllm/vllm-openai-cpu:arm64 vllm/vllm-openai-cpu:v${RELEASE_VERSION}-a
|
|||||||
docker push vllm/vllm-openai-cpu:latest-arm64
|
docker push vllm/vllm-openai-cpu:latest-arm64
|
||||||
docker push vllm/vllm-openai-cpu:v${RELEASE_VERSION}-arm64
|
docker push vllm/vllm-openai-cpu:v${RELEASE_VERSION}-arm64
|
||||||
|
|
||||||
|
# Create multi-arch manifest:
|
||||||
|
|
||||||
|
docker manifest rm vllm/vllm-openai:latest
|
||||||
|
docker manifest create vllm/vllm-openai:latest vllm/vllm-openai:latest-x86_64 vllm/vllm-openai:latest-aarch64
|
||||||
|
docker manifest create vllm/vllm-openai:v${RELEASE_VERSION} vllm/vllm-openai:v${RELEASE_VERSION}-x86_64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
|
||||||
|
docker manifest push vllm/vllm-openai:latest
|
||||||
|
docker manifest push vllm/vllm-openai:v${RELEASE_VERSION}
|
||||||
|
|
||||||
|
docker manifest rm vllm/vllm-openai:latest-cu130
|
||||||
|
docker manifest create vllm/vllm-openai:latest-cu130 vllm/vllm-openai:latest-x86_64-cu130 vllm/vllm-openai:latest-aarch64-cu130
|
||||||
|
docker manifest create vllm/vllm-openai:v${RELEASE_VERSION}-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-x86_64-cu130 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64-cu130
|
||||||
|
docker manifest push vllm/vllm-openai:latest-cu130
|
||||||
|
docker manifest push vllm/vllm-openai:v${RELEASE_VERSION}-cu130
|
||||||
|
|
||||||
docker manifest rm vllm/vllm-openai-cpu:latest || true
|
docker manifest rm vllm/vllm-openai-cpu:latest || true
|
||||||
docker manifest create vllm/vllm-openai-cpu:latest vllm/vllm-openai-cpu:latest-x86_64 vllm/vllm-openai-cpu:latest-arm64
|
docker manifest create vllm/vllm-openai-cpu:latest vllm/vllm-openai-cpu:latest-x86_64 vllm/vllm-openai-cpu:latest-arm64
|
||||||
docker manifest create vllm/vllm-openai-cpu:v${RELEASE_VERSION} vllm/vllm-openai-cpu:v${RELEASE_VERSION}-x86_64 vllm/vllm-openai-cpu:v${RELEASE_VERSION}-arm64
|
docker manifest create vllm/vllm-openai-cpu:v${RELEASE_VERSION} vllm/vllm-openai-cpu:v${RELEASE_VERSION}-x86_64 vllm/vllm-openai-cpu:v${RELEASE_VERSION}-arm64
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ S3_REGION="${AWS_DEFAULT_REGION:-us-west-2}"
|
|||||||
S3_URL="http://${S3_BUCKET}.s3-website-${S3_REGION}.amazonaws.com"
|
S3_URL="http://${S3_BUCKET}.s3-website-${S3_REGION}.amazonaws.com"
|
||||||
|
|
||||||
# Format ROCm version for path (e.g., "7.1" -> "rocm710")
|
# Format ROCm version for path (e.g., "7.1" -> "rocm710")
|
||||||
ROCM_VERSION_PATH="rocm$(echo ${ROCM_VERSION} | tr -d '.')"
|
ROCM_VERSION_PATH="rocm$(echo "${ROCM_VERSION}" | tr -d '.')"
|
||||||
ROCM_PATH="rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}"
|
ROCM_PATH="rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}"
|
||||||
buildkite-agent annotate --style 'success' --context 'rocm-release-workflow' << EOF
|
buildkite-agent annotate --style 'success' --context 'rocm-release-workflow' << EOF
|
||||||
## ROCm Wheel and Docker Image Releases
|
## ROCm Wheel and Docker Image Releases
|
||||||
@@ -68,7 +68,7 @@ aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/triton
|
|||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/torchvision-*.whl .
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/torchvision-*.whl .
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/torchaudio-*.whl .
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/torchaudio-*.whl .
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/amdsmi-*.whl .
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/amdsmi-*.whl .
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/aiter-*.whl .
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/amd_aiter-*.whl .
|
||||||
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/flash-attn-*.whl .
|
aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/flash-attn-*.whl .
|
||||||
\`\`\`
|
\`\`\`
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ aws s3 cp s3://${S3_BUCKET}/rocm/${BUILDKITE_COMMIT}/${ROCM_VERSION_PATH}/flash-
|
|||||||
- **torchvision**: TorchVision for ROCm PyTorch
|
- **torchvision**: TorchVision for ROCm PyTorch
|
||||||
- **torchaudio**: Torchaudio for ROCm PyTorch
|
- **torchaudio**: Torchaudio for ROCm PyTorch
|
||||||
- **amdsmi**: AMD SMI Python bindings
|
- **amdsmi**: AMD SMI Python bindings
|
||||||
- **aiter**: Aiter for ROCm
|
- **amd_aiter**: Aiter for ROCm
|
||||||
- **flash-attn**: Flash Attention for ROCm
|
- **flash-attn**: Flash Attention for ROCm
|
||||||
|
|
||||||
### :warning: Notes
|
### :warning: Notes
|
||||||
|
|||||||
@@ -83,7 +83,7 @@ case "${1:-}" in
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
WHEEL_COUNT=$(ls artifacts/rocm-base-wheels/*.whl 2>/dev/null | wc -l)
|
WHEEL_COUNT=$(find artifacts/rocm-base-wheels -maxdepth 1 -name '*.whl' 2>/dev/null | wc -l)
|
||||||
if [[ "$WHEEL_COUNT" -eq 0 ]]; then
|
if [[ "$WHEEL_COUNT" -eq 0 ]]; then
|
||||||
echo "ERROR: No wheels found in artifacts/rocm-base-wheels/" >&2
|
echo "ERROR: No wheels found in artifacts/rocm-base-wheels/" >&2
|
||||||
exit 1
|
exit 1
|
||||||
@@ -110,9 +110,9 @@ case "${1:-}" in
|
|||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Downloaded wheels:"
|
echo "Downloaded wheels:"
|
||||||
ls -lh artifacts/rocm-base-wheels/
|
find artifacts/rocm-base-wheels -maxdepth 1 -name '*.whl' -exec ls -lh {} \;
|
||||||
|
|
||||||
WHEEL_COUNT=$(ls artifacts/rocm-base-wheels/*.whl 2>/dev/null | wc -l)
|
WHEEL_COUNT=$(find artifacts/rocm-base-wheels -maxdepth 1 -name '*.whl' 2>/dev/null | wc -l)
|
||||||
echo ""
|
echo ""
|
||||||
echo "Total: $WHEEL_COUNT wheels"
|
echo "Total: $WHEEL_COUNT wheels"
|
||||||
echo "========================================"
|
echo "========================================"
|
||||||
|
|||||||
205
.buildkite/scripts/check-ray-compatibility.sh
Normal file
205
.buildkite/scripts/check-ray-compatibility.sh
Normal file
@@ -0,0 +1,205 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
#
|
||||||
|
# Check if Ray LLM can generate lock files that are compatible with this
|
||||||
|
# version of vllm. Downloads Ray's requirement files and runs a full
|
||||||
|
# dependency resolution with the installed vllm's constraints to see if
|
||||||
|
# a valid lock file can be produced.
|
||||||
|
#
|
||||||
|
# See: https://github.com/vllm-project/vllm/issues/33599
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
RAY_BASE_URL="https://raw.githubusercontent.com/ray-project/ray/master/python"
|
||||||
|
|
||||||
|
WORK_DIR=$(mktemp -d)
|
||||||
|
trap 'rm -rf "$WORK_DIR"' EXIT
|
||||||
|
|
||||||
|
# Fetch all Ray requirement files used in the LLM depset pipeline
|
||||||
|
echo ">>> Fetching Ray requirement files"
|
||||||
|
RAY_FILES=(
|
||||||
|
"requirements.txt"
|
||||||
|
"requirements/cloud-requirements.txt"
|
||||||
|
"requirements/base-test-requirements.txt"
|
||||||
|
"requirements/llm/llm-requirements.txt"
|
||||||
|
"requirements/llm/llm-test-requirements.txt"
|
||||||
|
)
|
||||||
|
for FILE in "${RAY_FILES[@]}"; do
|
||||||
|
LOCAL_PATH="${WORK_DIR}/$(basename "$FILE")"
|
||||||
|
echo " ${FILE}"
|
||||||
|
curl -fsSL -o "$LOCAL_PATH" "${RAY_BASE_URL}/${FILE}"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Extract installed vllm deps
|
||||||
|
echo ">>> Extracting installed vllm dependency constraints"
|
||||||
|
python3 - "${WORK_DIR}/vllm-constraints.txt" <<'PYEOF'
|
||||||
|
"""Write out the installed vllm's dependencies as pip constraint lines.
|
||||||
|
|
||||||
|
Ray uses vllm[audio], so audio-extra deps are included with their extra
|
||||||
|
markers stripped. The resolver cannot evaluate extra markers for a
|
||||||
|
package that is not itself being resolved from an index, so we activate
|
||||||
|
them manually here.
|
||||||
|
"""
|
||||||
|
import importlib.metadata
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
out_path = sys.argv[1]
|
||||||
|
raw_reqs = importlib.metadata.requires("vllm") or []
|
||||||
|
|
||||||
|
# Ray uses vllm[audio] – activate that extra.
|
||||||
|
ACTIVE_EXTRAS = {"audio"}
|
||||||
|
EXTRA_RE = re.compile(r"""extra\s*==\s*['"]([^'"]+)['"]""")
|
||||||
|
|
||||||
|
lines = []
|
||||||
|
for r in raw_reqs:
|
||||||
|
if ";" not in r:
|
||||||
|
# Unconditional dep — always include.
|
||||||
|
lines.append(r.strip())
|
||||||
|
continue
|
||||||
|
|
||||||
|
req_part, _, marker_part = r.partition(";")
|
||||||
|
marker_part = marker_part.strip()
|
||||||
|
|
||||||
|
extra_matches = EXTRA_RE.findall(marker_part)
|
||||||
|
if not extra_matches:
|
||||||
|
# Non-extra marker (python_version, etc.) — keep as-is.
|
||||||
|
lines.append(r.strip())
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not ACTIVE_EXTRAS.intersection(extra_matches):
|
||||||
|
continue # Skip inactive extras (tensorizer, bench, …).
|
||||||
|
|
||||||
|
# Strip the extra== conditions but keep any remaining markers
|
||||||
|
# (e.g. python_version).
|
||||||
|
cleaned = EXTRA_RE.sub("", marker_part)
|
||||||
|
cleaned = re.sub(r"\band\b\s*\band\b", "and", cleaned)
|
||||||
|
cleaned = re.sub(r"^\s*and\s+|\s+and\s*$", "", cleaned).strip()
|
||||||
|
|
||||||
|
if cleaned:
|
||||||
|
lines.append(f"{req_part.strip()} ; {cleaned}")
|
||||||
|
else:
|
||||||
|
lines.append(req_part.strip())
|
||||||
|
|
||||||
|
with open(out_path, "w") as f:
|
||||||
|
for line in lines:
|
||||||
|
f.write(line + "\n")
|
||||||
|
|
||||||
|
print(f"Wrote {len(lines)} constraints to {out_path}")
|
||||||
|
PYEOF
|
||||||
|
|
||||||
|
echo ">>> Installed vllm deps (first 20 lines):"
|
||||||
|
head -20 "${WORK_DIR}/vllm-constraints.txt"
|
||||||
|
|
||||||
|
# Remove Ray's vllm pin — the installed vllm's transitive deps
|
||||||
|
# (written above) replace it in the resolution. vllm itself cannot
|
||||||
|
# be resolved from PyPI for in-development versions, so we test
|
||||||
|
# whether Ray's requirements can coexist with vllm's dependency
|
||||||
|
# constraints instead.
|
||||||
|
sed -i '/^vllm/d' "${WORK_DIR}/llm-requirements.txt"
|
||||||
|
|
||||||
|
# Install uv if needed
|
||||||
|
if ! command -v uv &>/dev/null; then
|
||||||
|
echo ">>> Installing uv"
|
||||||
|
pip install uv -q
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Resolve: given vllm's constraints, can Ray compile a lock file?
|
||||||
|
#
|
||||||
|
# vllm's dependency constraints are the fixed side — Ray is flexible and
|
||||||
|
# can regenerate its lock files. We pass vllm's constraints via -c so
|
||||||
|
# the resolver treats them as non-negotiable bounds, then check whether
|
||||||
|
# Ray's own requirements can still be satisfied within those bounds.
|
||||||
|
echo ""
|
||||||
|
echo "============================================================"
|
||||||
|
echo ">>> Resolving: Can Ray generate compatible lock files?"
|
||||||
|
echo "============================================================"
|
||||||
|
|
||||||
|
set +e
|
||||||
|
uv pip compile \
|
||||||
|
"${WORK_DIR}/requirements.txt" \
|
||||||
|
"${WORK_DIR}/cloud-requirements.txt" \
|
||||||
|
"${WORK_DIR}/base-test-requirements.txt" \
|
||||||
|
"${WORK_DIR}/llm-requirements.txt" \
|
||||||
|
"${WORK_DIR}/llm-test-requirements.txt" \
|
||||||
|
-c "${WORK_DIR}/vllm-constraints.txt" \
|
||||||
|
--python-version 3.12 \
|
||||||
|
--python-platform x86_64-manylinux_2_31 \
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/cu129 \
|
||||||
|
--index-strategy unsafe-best-match \
|
||||||
|
--unsafe-package setuptools \
|
||||||
|
--unsafe-package ray \
|
||||||
|
--no-header \
|
||||||
|
-o "${WORK_DIR}/resolved.txt" \
|
||||||
|
2>&1
|
||||||
|
EXIT_CODE=$?
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=========================================="
|
||||||
|
if [ $EXIT_CODE -eq 0 ]; then
|
||||||
|
echo "SUCCESS: Ray can generate lock files compatible with this vllm."
|
||||||
|
echo ""
|
||||||
|
echo "Key resolved versions:"
|
||||||
|
grep -E '^(protobuf|torch|numpy|transformers)==' \
|
||||||
|
"${WORK_DIR}/resolved.txt" | sort || true
|
||||||
|
echo "=========================================="
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "FAILURE: Ray cannot generate lock files compatible with this vllm."
|
||||||
|
echo "This means a fundamental dependency conflict exists that Ray"
|
||||||
|
echo "cannot resolve by regenerating its lock files."
|
||||||
|
echo "See: https://github.com/vllm-project/vllm/issues/33599"
|
||||||
|
echo "=========================================="
|
||||||
|
|
||||||
|
# Buildkite annotation
|
||||||
|
if [ -f /usr/bin/buildkite-agent ]; then
|
||||||
|
buildkite-agent annotate --style 'warning' --context 'ray-compat' << EOF
|
||||||
|
### :warning: Ray Dependency Compatibility Warning
|
||||||
|
This PR introduces dependencies that **cannot** be resolved with Ray's requirements.
|
||||||
|
Ray would not be able to regenerate its lock files to accommodate this vllm version.
|
||||||
|
|
||||||
|
Please check the **Ray Dependency Compatibility Check** step logs for details.
|
||||||
|
See [issue #33599](https://github.com/vllm-project/vllm/issues/33599) for context.
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Notify Slack if webhook is configured.
|
||||||
|
if [ -n "$RAY_COMPAT_SLACK_WEBHOOK_URL" ]; then
|
||||||
|
echo ">>> Sending Slack notification"
|
||||||
|
# Single quotes are intentional: the f-string expressions are Python, not shell.
|
||||||
|
# shellcheck disable=SC2016
|
||||||
|
PAYLOAD=$(python3 -c '
|
||||||
|
import json, os, sys
|
||||||
|
pr = os.getenv("BUILDKITE_PULL_REQUEST", "N/A")
|
||||||
|
branch = os.getenv("BUILDKITE_BRANCH", "unknown")
|
||||||
|
url = os.getenv("BUILDKITE_BUILD_URL", "#")
|
||||||
|
data = {
|
||||||
|
"text": ":warning: Ray Dependency Compatibility Check Failed",
|
||||||
|
"blocks": [{
|
||||||
|
"type": "section",
|
||||||
|
"text": {
|
||||||
|
"type": "mrkdwn",
|
||||||
|
"text": (
|
||||||
|
"*:warning: Ray Dependency Compatibility Check Failed*\n"
|
||||||
|
f"PR #{pr} on branch `{branch}` introduces dependencies "
|
||||||
|
f"that cannot be resolved with Ray'\''s requirements.\n"
|
||||||
|
f"<{url}|View Build>"
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}],
|
||||||
|
}
|
||||||
|
print(json.dumps(data))
|
||||||
|
')
|
||||||
|
|
||||||
|
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST "$RAY_COMPAT_SLACK_WEBHOOK_URL" \
|
||||||
|
-H 'Content-type: application/json' \
|
||||||
|
-d "$PAYLOAD")
|
||||||
|
echo " Slack webhook response: $HTTP_CODE"
|
||||||
|
else
|
||||||
|
echo ">>> Skipping Slack notification (RAY_COMPAT_SLACK_WEBHOOK_URL not set)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 1
|
||||||
@@ -134,7 +134,7 @@ log_info "Fetching merged PRs from milestone '${MILESTONE}'..."
|
|||||||
|
|
||||||
# Store PR data in a temp file
|
# Store PR data in a temp file
|
||||||
PR_DATA=$(mktemp)
|
PR_DATA=$(mktemp)
|
||||||
trap "rm -f $PR_DATA" EXIT
|
trap 'rm -f "$PR_DATA"' EXIT
|
||||||
|
|
||||||
if ! gh pr list --state merged --search "milestone:${MILESTONE}" \
|
if ! gh pr list --state merged --search "milestone:${MILESTONE}" \
|
||||||
--limit 1000 \
|
--limit 1000 \
|
||||||
|
|||||||
@@ -1,25 +1,57 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# This script runs test inside the corresponding ROCm docker container.
|
# This script runs tests inside the corresponding ROCm docker container.
|
||||||
|
# It handles both single-node and multi-node test configurations.
|
||||||
|
#
|
||||||
|
# Multi-node detection: Instead of matching on fragile group names, we detect
|
||||||
|
# multi-node jobs structurally by looking for the bracket command syntax
|
||||||
|
# "[node0_cmds] && [node1_cmds]" or via the NUM_NODES environment variable.
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
# QUOTING / COMMAND PASSING
|
||||||
|
#
|
||||||
|
# Passing commands as positional arguments ($*) is fragile when the command
|
||||||
|
# string itself contains double quotes, e.g.:
|
||||||
|
#
|
||||||
|
# bash run-amd-test.sh "export FLAGS="value" && pytest -m "not slow""
|
||||||
|
#
|
||||||
|
# The outer shell resolves the nested quotes *before* this script runs, so
|
||||||
|
# the script receives mangled input it cannot fully recover.
|
||||||
|
#
|
||||||
|
# Preferred: pass commands via the VLLM_TEST_COMMANDS environment variable:
|
||||||
|
#
|
||||||
|
# export VLLM_TEST_COMMANDS='export FLAGS="value" && pytest -m "not slow"'
|
||||||
|
# bash run-amd-test.sh
|
||||||
|
#
|
||||||
|
# Single-quoted assignment preserves all inner double quotes verbatim.
|
||||||
|
# The $* path is kept for backward compatibility but callers should migrate.
|
||||||
|
###############################################################################
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
# Export Python path
|
# Export Python path
|
||||||
export PYTHONPATH=".."
|
export PYTHONPATH=".."
|
||||||
|
|
||||||
# Print ROCm version
|
###############################################################################
|
||||||
echo "--- Confirming Clean Initial State"
|
# Helper Functions
|
||||||
while true; do
|
###############################################################################
|
||||||
sleep 3
|
|
||||||
if grep -q clean /opt/amdgpu/etc/gpu_state; then
|
|
||||||
echo "GPUs state is \"clean\""
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "--- ROCm info"
|
wait_for_clean_gpus() {
|
||||||
rocminfo
|
local timeout=${1:-300}
|
||||||
|
local start=$SECONDS
|
||||||
|
echo "--- Waiting for clean GPU state (timeout: ${timeout}s)"
|
||||||
|
while true; do
|
||||||
|
if grep -q clean /opt/amdgpu/etc/gpu_state; then
|
||||||
|
echo "GPUs state is \"clean\""
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
if (( SECONDS - start >= timeout )); then
|
||||||
|
echo "Error: GPUs did not reach clean state within ${timeout}s" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sleep 3
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
# cleanup older docker images
|
|
||||||
cleanup_docker() {
|
cleanup_docker() {
|
||||||
# Get Docker's root directory
|
# Get Docker's root directory
|
||||||
docker_root=$(docker info -f '{{.DockerRootDir}}')
|
docker_root=$(docker info -f '{{.DockerRootDir}}')
|
||||||
@@ -28,15 +60,12 @@ cleanup_docker() {
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo "Docker root directory: $docker_root"
|
echo "Docker root directory: $docker_root"
|
||||||
# Check disk usage of the filesystem where Docker's root directory is located
|
|
||||||
disk_usage=$(df "$docker_root" | tail -1 | awk '{print $5}' | sed 's/%//')
|
disk_usage=$(df "$docker_root" | tail -1 | awk '{print $5}' | sed 's/%//')
|
||||||
# Define the threshold
|
|
||||||
threshold=70
|
threshold=70
|
||||||
if [ "$disk_usage" -gt "$threshold" ]; then
|
if [ "$disk_usage" -gt "$threshold" ]; then
|
||||||
echo "Disk usage is above $threshold%. Cleaning up Docker images and volumes..."
|
echo "Disk usage is above $threshold%. Cleaning up Docker images and volumes..."
|
||||||
# Remove dangling images (those that are not tagged and not used by any container)
|
|
||||||
docker image prune -f
|
docker image prune -f
|
||||||
# Remove unused volumes / force the system prune for old images as well.
|
|
||||||
docker volume prune -f && docker system prune --force --filter "until=72h" --all
|
docker volume prune -f && docker system prune --force --filter "until=72h" --all
|
||||||
echo "Docker images and volumes cleanup completed."
|
echo "Docker images and volumes cleanup completed."
|
||||||
else
|
else
|
||||||
@@ -45,193 +74,431 @@ cleanup_docker() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cleanup_network() {
|
cleanup_network() {
|
||||||
for node in $(seq 0 $((NUM_NODES-1))); do
|
local max_nodes=${NUM_NODES:-2}
|
||||||
if docker pr -a -q -f name="node${node}" | grep -q .; then
|
for node in $(seq 0 $((max_nodes - 1))); do
|
||||||
docker stop "node${node}"
|
if docker ps -a -q -f name="node${node}" | grep -q .; then
|
||||||
|
docker stop "node${node}" || true
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
if docker network ls | grep docker-net; then
|
if docker network ls | grep -q docker-net; then
|
||||||
docker network rm docker-net
|
docker network rm docker-net || true
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# Call the cleanup docker function
|
is_multi_node() {
|
||||||
|
local cmds="$1"
|
||||||
|
# Primary signal: NUM_NODES environment variable set by the pipeline
|
||||||
|
if [[ "${NUM_NODES:-1}" -gt 1 ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
# Fallback: detect the bracket syntax structurally
|
||||||
|
# Pattern: [...] && [...] (per-node command arrays)
|
||||||
|
if [[ "$cmds" =~ \[.*\].*\&\&.*\[.*\] ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
handle_pytest_exit() {
|
||||||
|
local exit_code=$1
|
||||||
|
if [ "$exit_code" -eq 5 ]; then
|
||||||
|
echo "Pytest exit code 5 (no tests collected) - treating as success."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
exit "$exit_code"
|
||||||
|
}
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Pytest marker/keyword re-quoting
|
||||||
|
#
|
||||||
|
# When commands are passed through Buildkite -> shell -> $* -> bash -c,
|
||||||
|
# quotes around multi-word pytest -m/-k expressions get stripped:
|
||||||
|
# pytest -v -s -m 'not cpu_test' v1/core
|
||||||
|
# becomes:
|
||||||
|
# pytest -v -s -m not cpu_test v1/core
|
||||||
|
#
|
||||||
|
# pytest then interprets "cpu_test" as a file path, not part of the marker.
|
||||||
|
#
|
||||||
|
# This function detects unquoted expressions after -m/-k and re-quotes them
|
||||||
|
# by collecting tokens until a recognizable boundary is reached:
|
||||||
|
# - test path (contains '/')
|
||||||
|
# - test file (ends with '.py')
|
||||||
|
# - another pytest flag (--xxx or -x single-char flags)
|
||||||
|
# - command separator (&& || ; |)
|
||||||
|
# - environment variable assignment (FOO=bar)
|
||||||
|
#
|
||||||
|
# Single-word markers (e.g. -m cpu_test, -m hybrid_model) pass through
|
||||||
|
# unquoted since they have no spaces and work fine.
|
||||||
|
#
|
||||||
|
# Already-quoted expressions (containing literal single quotes) are passed
|
||||||
|
# through untouched to avoid double-quoting values injected by
|
||||||
|
# apply_rocm_test_overrides.
|
||||||
|
#
|
||||||
|
# NOTE: This ONLY fixes -m/-k flags. It cannot recover arbitrary inner
|
||||||
|
# double-quotes stripped by the calling shell (see header comment).
|
||||||
|
# Use VLLM_TEST_COMMANDS to avoid the problem entirely.
|
||||||
|
###############################################################################
|
||||||
|
re_quote_pytest_markers() {
|
||||||
|
local input="$1"
|
||||||
|
local output=""
|
||||||
|
local collecting=false
|
||||||
|
local marker_buf=""
|
||||||
|
|
||||||
|
# Strip backslash-newline continuations, then flatten remaining newlines
|
||||||
|
local flat="${input//$'\\\n'/ }"
|
||||||
|
flat="${flat//$'\n'/ }"
|
||||||
|
|
||||||
|
# Disable globbing to prevent *.py etc. from expanding during read -ra
|
||||||
|
local restore_glob
|
||||||
|
restore_glob="$(shopt -p -o noglob 2>/dev/null || true)"
|
||||||
|
set -o noglob
|
||||||
|
local -a words
|
||||||
|
read -ra words <<< "$flat"
|
||||||
|
eval "$restore_glob"
|
||||||
|
|
||||||
|
for word in "${words[@]}"; do
|
||||||
|
if $collecting; then
|
||||||
|
# If the token we're about to collect already contains a literal
|
||||||
|
# single quote, the expression was already quoted upstream.
|
||||||
|
# Flush and stop collecting.
|
||||||
|
if [[ "$word" == *"'"* ]]; then
|
||||||
|
if [[ -n "$marker_buf" ]]; then
|
||||||
|
# Should not normally happen (partial buf + quote), flush raw
|
||||||
|
output+="${marker_buf} "
|
||||||
|
marker_buf=""
|
||||||
|
fi
|
||||||
|
output+="${word} "
|
||||||
|
collecting=false
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
local is_boundary=false
|
||||||
|
case "$word" in
|
||||||
|
# Line-continuation artifact
|
||||||
|
"\\")
|
||||||
|
is_boundary=true ;;
|
||||||
|
# Command separators
|
||||||
|
"&&"|"||"|";"|"|")
|
||||||
|
is_boundary=true ;;
|
||||||
|
# Long flags (--ignore, --shard-id, etc.)
|
||||||
|
--*)
|
||||||
|
is_boundary=true ;;
|
||||||
|
# Short flags (-v, -s, -x, etc.) but NOT negative marker tokens
|
||||||
|
# like "not" which don't start with "-". Also skip -k/-m which
|
||||||
|
# would start a new marker (handled below).
|
||||||
|
-[a-zA-Z])
|
||||||
|
is_boundary=true ;;
|
||||||
|
# Test path (contains /)
|
||||||
|
*/*)
|
||||||
|
is_boundary=true ;;
|
||||||
|
# Test file (ends with .py, possibly with ::method)
|
||||||
|
*.py|*.py::*)
|
||||||
|
is_boundary=true ;;
|
||||||
|
# Environment variable assignment preceding a command (FOO=bar)
|
||||||
|
*=*)
|
||||||
|
# Only treat as boundary if it looks like VAR=value, not
|
||||||
|
# pytest filter expressions like num_gpus=2 inside markers
|
||||||
|
if [[ "$word" =~ ^[A-Z_][A-Z0-9_]*= ]]; then
|
||||||
|
is_boundary=true
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if $is_boundary; then
|
||||||
|
# Flush the collected marker expression
|
||||||
|
if [[ "$marker_buf" == *" "* || "$marker_buf" == *"("* ]]; then
|
||||||
|
output+="'${marker_buf}' "
|
||||||
|
else
|
||||||
|
output+="${marker_buf} "
|
||||||
|
fi
|
||||||
|
collecting=false
|
||||||
|
marker_buf=""
|
||||||
|
# Check if this boundary word itself starts a new -m/-k
|
||||||
|
if [[ "$word" == "-m" || "$word" == "-k" ]]; then
|
||||||
|
output+="${word} "
|
||||||
|
collecting=true
|
||||||
|
# Drop stray backslash tokens silently
|
||||||
|
elif [[ "$word" == "\\" ]]; then
|
||||||
|
:
|
||||||
|
else
|
||||||
|
output+="${word} "
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# Accumulate into marker buffer
|
||||||
|
if [[ -n "$marker_buf" ]]; then
|
||||||
|
marker_buf+=" ${word}"
|
||||||
|
else
|
||||||
|
marker_buf="${word}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
elif [[ "$word" == "-m" || "$word" == "-k" ]]; then
|
||||||
|
output+="${word} "
|
||||||
|
collecting=true
|
||||||
|
marker_buf=""
|
||||||
|
else
|
||||||
|
output+="${word} "
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Flush any trailing marker expression (marker at end of command)
|
||||||
|
if $collecting && [[ -n "$marker_buf" ]]; then
|
||||||
|
if [[ "$marker_buf" == *" "* || "$marker_buf" == *"("* ]]; then
|
||||||
|
output+="'${marker_buf}'"
|
||||||
|
else
|
||||||
|
output+="${marker_buf}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${output% }"
|
||||||
|
}
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# ROCm-specific pytest command rewrites
|
||||||
|
#
|
||||||
|
# These apply ignore flags and environment overrides for tests that are not
|
||||||
|
# yet supported or behave differently on ROCm hardware. Kept as a single
|
||||||
|
# function so new exclusions are easy to add in one place.
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
apply_rocm_test_overrides() {
|
||||||
|
local cmds="$1"
|
||||||
|
|
||||||
|
# --- Model registry filter ---
|
||||||
|
if [[ $cmds == *"pytest -v -s models/test_registry.py"* ]]; then
|
||||||
|
cmds=${cmds//"pytest -v -s models/test_registry.py"/"pytest -v -s models/test_registry.py -k 'not BambaForCausalLM and not GritLM and not Mamba2ForCausalLM and not Zamba2ForCausalLM'"}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- LoRA: disable custom paged attention ---
|
||||||
|
if [[ $cmds == *"pytest -v -s lora"* ]]; then
|
||||||
|
cmds=${cmds//"pytest -v -s lora"/"VLLM_ROCM_CUSTOM_PAGED_ATTN=0 pytest -v -s lora"}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- Kernel ignores ---
|
||||||
|
if [[ $cmds == *" kernels/core"* ]]; then
|
||||||
|
cmds="${cmds} \
|
||||||
|
--ignore=kernels/core/test_fused_quant_layernorm.py \
|
||||||
|
--ignore=kernels/core/test_permute_cols.py"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $cmds == *" kernels/attention"* ]]; then
|
||||||
|
cmds="${cmds} \
|
||||||
|
--ignore=kernels/attention/test_attention_selector.py \
|
||||||
|
--ignore=kernels/attention/test_encoder_decoder_attn.py \
|
||||||
|
--ignore=kernels/attention/test_flash_attn.py \
|
||||||
|
--ignore=kernels/attention/test_flashinfer.py \
|
||||||
|
--ignore=kernels/attention/test_prefix_prefill.py \
|
||||||
|
--ignore=kernels/attention/test_cascade_flash_attn.py \
|
||||||
|
--ignore=kernels/attention/test_mha_attn.py \
|
||||||
|
--ignore=kernels/attention/test_lightning_attn.py \
|
||||||
|
--ignore=kernels/attention/test_attention.py"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $cmds == *" kernels/quantization"* ]]; then
|
||||||
|
cmds="${cmds} \
|
||||||
|
--ignore=kernels/quantization/test_int8_quant.py \
|
||||||
|
--ignore=kernels/quantization/test_machete_mm.py \
|
||||||
|
--ignore=kernels/quantization/test_block_fp8.py \
|
||||||
|
--ignore=kernels/quantization/test_block_int8.py \
|
||||||
|
--ignore=kernels/quantization/test_marlin_gemm.py \
|
||||||
|
--ignore=kernels/quantization/test_cutlass_scaled_mm.py \
|
||||||
|
--ignore=kernels/quantization/test_int8_kernel.py"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $cmds == *" kernels/mamba"* ]]; then
|
||||||
|
cmds="${cmds} \
|
||||||
|
--ignore=kernels/mamba/test_mamba_mixer2.py \
|
||||||
|
--ignore=kernels/mamba/test_causal_conv1d.py \
|
||||||
|
--ignore=kernels/mamba/test_mamba_ssm_ssd.py"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $cmds == *" kernels/moe"* ]]; then
|
||||||
|
cmds="${cmds} \
|
||||||
|
--ignore=kernels/moe/test_moe.py \
|
||||||
|
--ignore=kernels/moe/test_cutlass_moe.py \
|
||||||
|
--ignore=kernels/moe/test_triton_moe_ptpc_fp8.py"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- Entrypoint ignores ---
|
||||||
|
if [[ $cmds == *" entrypoints/openai "* ]]; then
|
||||||
|
cmds=${cmds//" entrypoints/openai "/" entrypoints/openai \
|
||||||
|
--ignore=entrypoints/openai/test_audio.py \
|
||||||
|
--ignore=entrypoints/openai/test_shutdown.py \
|
||||||
|
--ignore=entrypoints/openai/test_completion.py \
|
||||||
|
--ignore=entrypoints/openai/test_models.py \
|
||||||
|
--ignore=entrypoints/openai/test_lora_adapters.py \
|
||||||
|
--ignore=entrypoints/openai/test_return_tokens_as_ids.py \
|
||||||
|
--ignore=entrypoints/openai/test_root_path.py \
|
||||||
|
--ignore=entrypoints/openai/test_tokenization.py \
|
||||||
|
--ignore=entrypoints/openai/test_prompt_validation.py "}
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $cmds == *" entrypoints/llm "* ]]; then
|
||||||
|
cmds=${cmds//" entrypoints/llm "/" entrypoints/llm \
|
||||||
|
--ignore=entrypoints/llm/test_chat.py \
|
||||||
|
--ignore=entrypoints/llm/test_accuracy.py \
|
||||||
|
--ignore=entrypoints/llm/test_init.py \
|
||||||
|
--ignore=entrypoints/llm/test_prompt_validation.py "}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up escaped newlines from --ignore appends
|
||||||
|
cmds=$(echo "$cmds" | sed 's/ \\ / /g')
|
||||||
|
|
||||||
|
echo "$cmds"
|
||||||
|
}
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Main
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# --- GPU initialization ---
|
||||||
|
echo "--- Confirming Clean Initial State"
|
||||||
|
wait_for_clean_gpus
|
||||||
|
|
||||||
|
echo "--- ROCm info"
|
||||||
|
rocminfo
|
||||||
|
|
||||||
|
# --- Docker housekeeping ---
|
||||||
cleanup_docker
|
cleanup_docker
|
||||||
|
|
||||||
echo "--- Resetting GPUs"
|
echo "--- Resetting GPUs"
|
||||||
|
|
||||||
echo "reset" > /opt/amdgpu/etc/gpu_state
|
echo "reset" > /opt/amdgpu/etc/gpu_state
|
||||||
|
wait_for_clean_gpus
|
||||||
|
|
||||||
while true; do
|
# --- Pull test image ---
|
||||||
sleep 3
|
|
||||||
if grep -q clean /opt/amdgpu/etc/gpu_state; then
|
|
||||||
echo "GPUs state is \"clean\""
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "--- Pulling container"
|
echo "--- Pulling container"
|
||||||
image_name="rocm/vllm-ci:${BUILDKITE_COMMIT}"
|
image_name="rocm/vllm-ci:${BUILDKITE_COMMIT}"
|
||||||
container_name="rocm_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
container_name="rocm_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
||||||
docker pull "${image_name}"
|
docker pull "${image_name}"
|
||||||
|
|
||||||
remove_docker_container() {
|
remove_docker_container() {
|
||||||
docker rm -f "${container_name}" || docker image rm -f "${image_name}" || true
|
docker rm -f "${container_name}" || docker image rm -f "${image_name}" || true
|
||||||
}
|
}
|
||||||
trap remove_docker_container EXIT
|
trap remove_docker_container EXIT
|
||||||
|
|
||||||
|
# --- Prepare commands ---
|
||||||
echo "--- Running container"
|
echo "--- Running container"
|
||||||
|
|
||||||
HF_CACHE="$(realpath ~)/huggingface"
|
HF_CACHE="$(realpath ~)/huggingface"
|
||||||
mkdir -p "${HF_CACHE}"
|
mkdir -p "${HF_CACHE}"
|
||||||
HF_MOUNT="/root/.cache/huggingface"
|
HF_MOUNT="/root/.cache/huggingface"
|
||||||
|
|
||||||
commands=$@
|
# ---- Command source selection ----
|
||||||
|
# Prefer VLLM_TEST_COMMANDS (preserves all inner quoting intact).
|
||||||
|
# Fall back to $* for backward compatibility, but warn that inner
|
||||||
|
# double-quotes will have been stripped by the calling shell.
|
||||||
|
if [[ -n "${VLLM_TEST_COMMANDS:-}" ]]; then
|
||||||
|
commands="${VLLM_TEST_COMMANDS}"
|
||||||
|
echo "Commands sourced from VLLM_TEST_COMMANDS (quoting preserved)"
|
||||||
|
else
|
||||||
|
commands="$*"
|
||||||
|
if [[ -z "$commands" ]]; then
|
||||||
|
echo "Error: No test commands provided." >&2
|
||||||
|
echo "Usage:" >&2
|
||||||
|
echo " Preferred: VLLM_TEST_COMMANDS='...' bash $0" >&2
|
||||||
|
echo " Legacy: bash $0 \"commands here\"" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "Commands sourced from positional args (legacy mode)"
|
||||||
|
echo "WARNING: Inner double-quotes in the command string may have been"
|
||||||
|
echo " stripped by the calling shell. If you see syntax errors, switch to:"
|
||||||
|
echo " export VLLM_TEST_COMMANDS='your commands here'"
|
||||||
|
echo " bash $0"
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Raw commands: $commands"
|
echo "Raw commands: $commands"
|
||||||
|
|
||||||
commands=${commands//"pytest -v -s basic_correctness/test_basic_correctness.py"/"pytest -v -s basic_correctness/test_basic_correctness.py"}
|
# Fix quoting before ROCm overrides (so overrides see correct structure)
|
||||||
|
commands=$(re_quote_pytest_markers "$commands")
|
||||||
|
echo "After re-quoting: $commands"
|
||||||
|
|
||||||
if [[ $commands == *"pytest -v -s models/test_registry.py"* ]]; then
|
commands=$(apply_rocm_test_overrides "$commands")
|
||||||
commands=${commands//"pytest -v -s models/test_registry.py"/"pytest -v -s models/test_registry.py -k 'not BambaForCausalLM and not GritLM and not Mamba2ForCausalLM and not Zamba2ForCausalLM'"}
|
|
||||||
fi
|
|
||||||
|
|
||||||
commands=${commands//"pytest -v -s compile/test_basic_correctness.py"/"pytest -v -s compile/test_basic_correctness.py"}
|
|
||||||
|
|
||||||
if [[ $commands == *"pytest -v -s lora"* ]]; then
|
|
||||||
commands=${commands//"pytest -v -s lora"/"VLLM_ROCM_CUSTOM_PAGED_ATTN=0 pytest -v -s lora"}
|
|
||||||
fi
|
|
||||||
|
|
||||||
#ignore certain kernels tests
|
|
||||||
if [[ $commands == *" kernels/core"* ]]; then
|
|
||||||
commands="${commands} \
|
|
||||||
--ignore=kernels/core/test_fused_quant_layernorm.py \
|
|
||||||
--ignore=kernels/core/test_permute_cols.py"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $commands == *" kernels/attention"* ]]; then
|
|
||||||
commands="${commands} \
|
|
||||||
--ignore=kernels/attention/test_attention_selector.py \
|
|
||||||
--ignore=kernels/attention/test_encoder_decoder_attn.py \
|
|
||||||
--ignore=kernels/attention/test_flash_attn.py \
|
|
||||||
--ignore=kernels/attention/test_flashinfer.py \
|
|
||||||
--ignore=kernels/attention/test_prefix_prefill.py \
|
|
||||||
--ignore=kernels/attention/test_cascade_flash_attn.py \
|
|
||||||
--ignore=kernels/attention/test_mha_attn.py \
|
|
||||||
--ignore=kernels/attention/test_lightning_attn.py \
|
|
||||||
--ignore=kernels/attention/test_attention.py"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $commands == *" kernels/quantization"* ]]; then
|
|
||||||
commands="${commands} \
|
|
||||||
--ignore=kernels/quantization/test_int8_quant.py \
|
|
||||||
--ignore=kernels/quantization/test_machete_mm.py \
|
|
||||||
--ignore=kernels/quantization/test_block_fp8.py \
|
|
||||||
--ignore=kernels/quantization/test_block_int8.py \
|
|
||||||
--ignore=kernels/quantization/test_marlin_gemm.py \
|
|
||||||
--ignore=kernels/quantization/test_cutlass_scaled_mm.py \
|
|
||||||
--ignore=kernels/quantization/test_int8_kernel.py"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $commands == *" kernels/mamba"* ]]; then
|
|
||||||
commands="${commands} \
|
|
||||||
--ignore=kernels/mamba/test_mamba_mixer2.py \
|
|
||||||
--ignore=kernels/mamba/test_causal_conv1d.py \
|
|
||||||
--ignore=kernels/mamba/test_mamba_ssm_ssd.py"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $commands == *" kernels/moe"* ]]; then
|
|
||||||
commands="${commands} \
|
|
||||||
--ignore=kernels/moe/test_moe.py \
|
|
||||||
--ignore=kernels/moe/test_cutlass_moe.py \
|
|
||||||
--ignore=kernels/moe/test_triton_moe_ptpc_fp8.py"
|
|
||||||
fi
|
|
||||||
|
|
||||||
#ignore certain Entrypoints/openai tests
|
|
||||||
if [[ $commands == *" entrypoints/openai "* ]]; then
|
|
||||||
commands=${commands//" entrypoints/openai "/" entrypoints/openai \
|
|
||||||
--ignore=entrypoints/openai/test_audio.py \
|
|
||||||
--ignore=entrypoints/openai/test_shutdown.py \
|
|
||||||
--ignore=entrypoints/openai/test_completion.py \
|
|
||||||
--ignore=entrypoints/openai/test_models.py \
|
|
||||||
--ignore=entrypoints/openai/test_lora_adapters.py \
|
|
||||||
--ignore=entrypoints/openai/test_return_tokens_as_ids.py \
|
|
||||||
--ignore=entrypoints/openai/test_root_path.py \
|
|
||||||
--ignore=entrypoints/openai/test_tokenization.py \
|
|
||||||
--ignore=entrypoints/openai/test_prompt_validation.py "}
|
|
||||||
fi
|
|
||||||
|
|
||||||
#ignore certain Entrypoints/llm tests
|
|
||||||
if [[ $commands == *" entrypoints/llm "* ]]; then
|
|
||||||
commands=${commands//" entrypoints/llm "/" entrypoints/llm \
|
|
||||||
--ignore=entrypoints/llm/test_chat.py \
|
|
||||||
--ignore=entrypoints/llm/test_accuracy.py \
|
|
||||||
--ignore=entrypoints/llm/test_init.py \
|
|
||||||
--ignore=entrypoints/llm/test_prompt_validation.py "}
|
|
||||||
fi
|
|
||||||
|
|
||||||
commands=$(echo "$commands" | sed 's/ \\ / /g')
|
|
||||||
echo "Final commands: $commands"
|
echo "Final commands: $commands"
|
||||||
|
|
||||||
# --ignore=entrypoints/openai/test_encoder_decoder.py \
|
|
||||||
# --ignore=entrypoints/openai/test_embedding.py \
|
|
||||||
# --ignore=entrypoints/openai/test_oot_registration.py
|
|
||||||
# --ignore=entrypoints/openai/test_accuracy.py \
|
|
||||||
# --ignore=entrypoints/openai/test_models.py <= Fails on MI250 but passes on MI300 as of 2025-03-13
|
|
||||||
|
|
||||||
|
|
||||||
MYPYTHONPATH=".."
|
MYPYTHONPATH=".."
|
||||||
|
|
||||||
# Test that we're launching on the machine that has
|
# Verify GPU access
|
||||||
# proper access to GPUs
|
|
||||||
render_gid=$(getent group render | cut -d: -f3)
|
render_gid=$(getent group render | cut -d: -f3)
|
||||||
if [[ -z "$render_gid" ]]; then
|
if [[ -z "$render_gid" ]]; then
|
||||||
echo "Error: 'render' group not found. This is required for GPU access." >&2
|
echo "Error: 'render' group not found. This is required for GPU access." >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $commands == *"VLLM_TEST_GROUP_NAME=mi325_4-2-node-tests-4-gpus-in-total"* ]]; then
|
# --- RDMA device passthrough (conditional) ---
|
||||||
|
# If the host has RDMA devices, pass them through so tests like
|
||||||
|
# test_moriio_connector can access ibverbs. On hosts without RDMA
|
||||||
|
# hardware the tests will gracefully skip via _rdma_available().
|
||||||
|
RDMA_FLAGS=""
|
||||||
|
if [ -d /dev/infiniband ]; then
|
||||||
|
echo "RDMA devices detected on host, enabling passthrough"
|
||||||
|
RDMA_FLAGS="--device /dev/infiniband --cap-add=IPC_LOCK"
|
||||||
|
else
|
||||||
|
echo "No RDMA devices found on host, RDMA tests will be skipped"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# --- Route: multi-node vs single-node ---
|
||||||
|
if is_multi_node "$commands"; then
|
||||||
|
echo "--- Multi-node job detected"
|
||||||
export DCKR_VER=$(docker --version | sed 's/Docker version \(.*\), build .*/\1/')
|
export DCKR_VER=$(docker --version | sed 's/Docker version \(.*\), build .*/\1/')
|
||||||
|
|
||||||
if [[ "$commands" =~ ^(.*)"["(.*)"] && ["(.*)"]"$ ]]; then
|
# Parse the bracket syntax: prefix ; [node0_cmds] && [node1_cmds]
|
||||||
prefix=$( echo "${BASH_REMATCH[1]}" | sed 's/;//g')
|
# BASH_REMATCH[1] = prefix (everything before first bracket)
|
||||||
echo "PREFIX: ${prefix}"
|
# BASH_REMATCH[2] = comma-separated node0 commands
|
||||||
export composite_command="(command rocm-smi || true)"
|
# BASH_REMATCH[3] = comma-separated node1 commands
|
||||||
myIFS=$IFS
|
if [[ "$commands" =~ ^(.*)\[(.*)"] && ["(.*)\]$ ]]; then
|
||||||
IFS=','
|
prefix=$(echo "${BASH_REMATCH[1]}" | sed 's/;//g')
|
||||||
read -ra node0 <<< ${BASH_REMATCH[2]}
|
echo "PREFIX: ${prefix}"
|
||||||
read -ra node1 <<< ${BASH_REMATCH[3]}
|
|
||||||
IFS=$myIFS
|
export composite_command="(command rocm-smi || true)"
|
||||||
for i in "${!node0[@]}";do
|
saved_IFS=$IFS
|
||||||
command_node_0=$(echo ${node0[i]} | sed 's/\"//g')
|
IFS=','
|
||||||
command_node_1=$(echo ${node1[i]} | sed 's/\"//g')
|
read -ra node0 <<< "${BASH_REMATCH[2]}"
|
||||||
|
read -ra node1 <<< "${BASH_REMATCH[3]}"
|
||||||
export commands="./.buildkite/scripts/run-multi-node-test.sh /vllm-workspace/tests 2 2 ${image_name} '${command_node_0}' '${command_node_1}'"
|
IFS=$saved_IFS
|
||||||
echo "COMMANDS: ${commands}"
|
|
||||||
composite_command=$(echo "${composite_command} && ${commands}")
|
if [[ ${#node0[@]} -ne ${#node1[@]} ]]; then
|
||||||
done
|
echo "Warning: node0 has ${#node0[@]} commands, node1 has ${#node1[@]}. They will be paired by index."
|
||||||
/bin/bash -c "${composite_command}"
|
fi
|
||||||
cleanup_network
|
|
||||||
|
for i in "${!node0[@]}"; do
|
||||||
|
command_node_0=$(echo "${node0[i]}" | sed 's/\"//g')
|
||||||
|
command_node_1=$(echo "${node1[i]}" | sed 's/\"//g')
|
||||||
|
|
||||||
|
step_cmd="./.buildkite/scripts/run-multi-node-test.sh /vllm-workspace/tests 2 2 ${image_name} '${command_node_0}' '${command_node_1}'"
|
||||||
|
echo "COMMANDS: ${step_cmd}"
|
||||||
|
composite_command="${composite_command} && ${step_cmd}"
|
||||||
|
done
|
||||||
|
|
||||||
|
/bin/bash -c "${composite_command}"
|
||||||
|
exit_code=$?
|
||||||
|
cleanup_network
|
||||||
|
handle_pytest_exit "$exit_code"
|
||||||
else
|
else
|
||||||
echo "Failed to parse node commands! Exiting."
|
echo "Multi-node job detected but failed to parse bracket command syntax."
|
||||||
cleanup_network
|
echo "Expected format: prefix ; [node0_cmd1, node0_cmd2] && [node1_cmd1, node1_cmd2]"
|
||||||
exit 111
|
echo "Got: $commands"
|
||||||
|
cleanup_network
|
||||||
|
exit 111
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
|
echo "--- Single-node job"
|
||||||
echo "Render devices: $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES"
|
echo "Render devices: $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES"
|
||||||
docker run \
|
docker run \
|
||||||
--device /dev/kfd $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES \
|
--device /dev/kfd $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES \
|
||||||
--network=host \
|
$RDMA_FLAGS \
|
||||||
--shm-size=16gb \
|
--network=host \
|
||||||
--group-add "$render_gid" \
|
--shm-size=16gb \
|
||||||
--rm \
|
--group-add "$render_gid" \
|
||||||
-e HF_TOKEN \
|
--rm \
|
||||||
-e AWS_ACCESS_KEY_ID \
|
-e HF_TOKEN \
|
||||||
-e AWS_SECRET_ACCESS_KEY \
|
-e AWS_ACCESS_KEY_ID \
|
||||||
-v "${HF_CACHE}:${HF_MOUNT}" \
|
-e AWS_SECRET_ACCESS_KEY \
|
||||||
-e "HF_HOME=${HF_MOUNT}" \
|
-v "${HF_CACHE}:${HF_MOUNT}" \
|
||||||
-e "PYTHONPATH=${MYPYTHONPATH}" \
|
-e "HF_HOME=${HF_MOUNT}" \
|
||||||
--name "${container_name}" \
|
-e "PYTHONPATH=${MYPYTHONPATH}" \
|
||||||
"${image_name}" \
|
--name "${container_name}" \
|
||||||
/bin/bash -c "${commands}"
|
"${image_name}" \
|
||||||
|
/bin/bash -c "${commands}"
|
||||||
|
|
||||||
|
exit_code=$?
|
||||||
|
handle_pytest_exit "$exit_code"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -0,0 +1,43 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -euox pipefail
|
||||||
|
export VLLM_CPU_CI_ENV=0
|
||||||
|
|
||||||
|
echo "--- PP+TP"
|
||||||
|
vllm serve meta-llama/Llama-3.2-3B-Instruct -tp=2 -pp=2 &
|
||||||
|
server_pid=$!
|
||||||
|
timeout 600 bash -c "until curl localhost:8000/v1/models > /dev/null 2>&1; do sleep 1; done" || exit 1
|
||||||
|
vllm bench serve \
|
||||||
|
--backend vllm \
|
||||||
|
--dataset-name random \
|
||||||
|
--model meta-llama/Llama-3.2-3B-Instruct \
|
||||||
|
--num-prompts 20 \
|
||||||
|
--result-dir ./test_results \
|
||||||
|
--result-filename tp_pp.json \
|
||||||
|
--save-result \
|
||||||
|
--endpoint /v1/completions
|
||||||
|
kill -s SIGTERM $server_pid; wait $server_pid || true
|
||||||
|
failed_req=$(jq '.failed' ./test_results/tp_pp.json)
|
||||||
|
if [ "$failed_req" -ne 0 ]; then
|
||||||
|
echo "Some requests were failed!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "--- DP+TP"
|
||||||
|
vllm serve meta-llama/Llama-3.2-3B-Instruct -tp=2 -dp=2 &
|
||||||
|
server_pid=$!
|
||||||
|
timeout 600 bash -c "until curl localhost:8000/v1/models > /dev/null 2>&1; do sleep 1; done" || exit 1
|
||||||
|
vllm bench serve \
|
||||||
|
--backend vllm \
|
||||||
|
--dataset-name random \
|
||||||
|
--model meta-llama/Llama-3.2-3B-Instruct \
|
||||||
|
--num-prompts 20 \
|
||||||
|
--result-dir ./test_results \
|
||||||
|
--result-filename dp_pp.json \
|
||||||
|
--save-result \
|
||||||
|
--endpoint /v1/completions
|
||||||
|
kill -s SIGTERM $server_pid; wait $server_pid || true
|
||||||
|
failed_req=$(jq '.failed' ./test_results/dp_pp.json)
|
||||||
|
if [ "$failed_req" -ne 0 ]; then
|
||||||
|
echo "Some requests were failed!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
@@ -27,7 +27,7 @@ function cpu_tests() {
|
|||||||
podman exec -it "$container_id" bash -c "
|
podman exec -it "$container_id" bash -c "
|
||||||
export TORCH_COMPILE_DISABLE=1
|
export TORCH_COMPILE_DISABLE=1
|
||||||
set -xve
|
set -xve
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m" >> $HOME/test_basic.log
|
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m" >> "$HOME"/test_basic.log
|
||||||
|
|
||||||
# Run basic model test
|
# Run basic model test
|
||||||
podman exec -it "$container_id" bash -c "
|
podman exec -it "$container_id" bash -c "
|
||||||
@@ -43,7 +43,7 @@ function cpu_tests() {
|
|||||||
pytest -v -s tests/models/language/generation/test_common.py::test_models[False-False-5-32-google/gemma-1.1-2b-it]
|
pytest -v -s tests/models/language/generation/test_common.py::test_models[False-False-5-32-google/gemma-1.1-2b-it]
|
||||||
pytest -v -s tests/models/language/pooling/test_classification.py::test_models[float-jason9693/Qwen2.5-1.5B-apeach]
|
pytest -v -s tests/models/language/pooling/test_classification.py::test_models[float-jason9693/Qwen2.5-1.5B-apeach]
|
||||||
# TODO: Below test case tests/models/language/pooling/test_embedding.py::test_models[True-ssmits/Qwen2-7B-Instruct-embed-base] fails on ppc64le. Disabling it for time being.
|
# TODO: Below test case tests/models/language/pooling/test_embedding.py::test_models[True-ssmits/Qwen2-7B-Instruct-embed-base] fails on ppc64le. Disabling it for time being.
|
||||||
# pytest -v -s tests/models/language/pooling/test_embedding.py -m cpu_model" >> $HOME/test_rest.log
|
# pytest -v -s tests/models/language/pooling/test_embedding.py -m cpu_model" >> "$HOME"/test_rest.log
|
||||||
}
|
}
|
||||||
|
|
||||||
# All of CPU tests are expected to be finished less than 40 mins.
|
# All of CPU tests are expected to be finished less than 40 mins.
|
||||||
|
|||||||
@@ -2,119 +2,19 @@
|
|||||||
|
|
||||||
# This script build the CPU docker image and run the offline inference inside the container.
|
# This script build the CPU docker image and run the offline inference inside the container.
|
||||||
# It serves a sanity check for compilation and basic model usage.
|
# It serves a sanity check for compilation and basic model usage.
|
||||||
set -ex
|
set -euox pipefail
|
||||||
|
|
||||||
# allow to bind to different cores
|
# allow to bind to different cores
|
||||||
CORE_RANGE=${CORE_RANGE:-48-95}
|
CORE_RANGE=${CORE_RANGE:-48-95}
|
||||||
# used for TP/PP E2E test
|
|
||||||
OMP_CORE_RANGE=${OMP_CORE_RANGE:-48-95}
|
|
||||||
NUMA_NODE=${NUMA_NODE:-1}
|
NUMA_NODE=${NUMA_NODE:-1}
|
||||||
|
IMAGE_NAME="cpu-test-$NUMA_NODE"
|
||||||
|
TIMEOUT_VAL=$1
|
||||||
|
TEST_COMMAND=$2
|
||||||
|
|
||||||
export CMAKE_BUILD_PARALLEL_LEVEL=32
|
# building the docker image
|
||||||
|
echo "--- :docker: Building Docker image"
|
||||||
# Setup cleanup
|
docker build --progress plain --tag "$IMAGE_NAME" --target vllm-test -f docker/Dockerfile.cpu .
|
||||||
remove_docker_container() {
|
|
||||||
set -e;
|
|
||||||
docker rm -f cpu-test-"$NUMA_NODE" cpu-test-"$NUMA_NODE"-avx2 || true;
|
|
||||||
}
|
|
||||||
trap remove_docker_container EXIT
|
|
||||||
remove_docker_container
|
|
||||||
|
|
||||||
# Try building the docker image
|
|
||||||
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --progress plain --tag cpu-test-"$NUMA_NODE" --target vllm-test -f docker/Dockerfile.cpu .
|
|
||||||
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --progress plain --build-arg VLLM_CPU_DISABLE_AVX512="true" --tag cpu-test-"$NUMA_NODE"-avx2 --target vllm-test -f docker/Dockerfile.cpu .
|
|
||||||
|
|
||||||
# Run the image, setting --shm-size=4g for tensor parallel.
|
# Run the image, setting --shm-size=4g for tensor parallel.
|
||||||
docker run -itd --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=16 --env VLLM_CPU_CI_ENV=1 -e E2E_OMP_THREADS="$OMP_CORE_RANGE" --shm-size=4g --name cpu-test-"$NUMA_NODE" cpu-test-"$NUMA_NODE"
|
docker run --rm --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN -e VLLM_CPU_KVCACHE_SPACE=16 -e VLLM_CPU_CI_ENV=1 -e VLLM_CPU_SIM_MULTI_NUMA=1 --shm-size=4g "$IMAGE_NAME" \
|
||||||
docker run -itd --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=16 --env VLLM_CPU_CI_ENV=1 -e E2E_OMP_THREADS="$OMP_CORE_RANGE" --shm-size=4g --name cpu-test-"$NUMA_NODE"-avx2 cpu-test-"$NUMA_NODE"-avx2
|
timeout "$TIMEOUT_VAL" bash -c "set -euox pipefail; echo \"--- Print packages\"; pip list; echo \"--- Running tests\"; ${TEST_COMMAND}"
|
||||||
|
|
||||||
function cpu_tests() {
|
|
||||||
set -e
|
|
||||||
export NUMA_NODE=$2
|
|
||||||
|
|
||||||
# list packages
|
|
||||||
docker exec cpu-test-"$NUMA_NODE"-avx2 bash -c "
|
|
||||||
set -e
|
|
||||||
pip list"
|
|
||||||
|
|
||||||
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
|
||||||
set -e
|
|
||||||
pip list"
|
|
||||||
|
|
||||||
# offline inference
|
|
||||||
docker exec cpu-test-"$NUMA_NODE"-avx2 bash -c "
|
|
||||||
set -e
|
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m"
|
|
||||||
|
|
||||||
# Run kernel tests
|
|
||||||
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
|
||||||
set -e
|
|
||||||
pytest -x -v -s tests/kernels/attention/test_cpu_attn.py
|
|
||||||
pytest -x -v -s tests/kernels/moe/test_cpu_fused_moe.py
|
|
||||||
pytest -x -v -s tests/kernels/test_onednn.py"
|
|
||||||
|
|
||||||
# Run basic model test
|
|
||||||
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
|
||||||
set -e
|
|
||||||
# Note: disable until supports V1
|
|
||||||
# pytest -x -v -s tests/kernels/attention/test_cache.py -m cpu_model
|
|
||||||
# pytest -x -v -s tests/kernels/attention/test_mla_decode_cpu.py -m cpu_model
|
|
||||||
|
|
||||||
pytest -x -v -s tests/models/language/generation -m cpu_model
|
|
||||||
VLLM_CPU_SGL_KERNEL=1 pytest -x -v -s tests/models/language/generation -m cpu_model
|
|
||||||
|
|
||||||
pytest -x -v -s tests/models/language/pooling -m cpu_model
|
|
||||||
pytest -x -v -s tests/models/multimodal/generation \
|
|
||||||
--ignore=tests/models/multimodal/generation/test_pixtral.py \
|
|
||||||
-m cpu_model"
|
|
||||||
|
|
||||||
# Run compressed-tensor test
|
|
||||||
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
|
||||||
set -e
|
|
||||||
pytest -x -s -v \
|
|
||||||
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_logprobs"
|
|
||||||
|
|
||||||
# Run AWQ/GPTQ test
|
|
||||||
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
|
||||||
set -e
|
|
||||||
pytest -x -s -v \
|
|
||||||
tests/quantization/test_cpu_wna16.py"
|
|
||||||
|
|
||||||
# Run multi-lora tests
|
|
||||||
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
|
||||||
set -e
|
|
||||||
pytest -x -s -v \
|
|
||||||
tests/lora/test_qwenvl.py"
|
|
||||||
|
|
||||||
# online serving: tp+pp
|
|
||||||
docker exec cpu-test-"$NUMA_NODE" bash -c '
|
|
||||||
set -e
|
|
||||||
VLLM_CPU_OMP_THREADS_BIND=$E2E_OMP_THREADS VLLM_CPU_SGL_KERNEL=1 vllm serve meta-llama/Llama-3.2-3B-Instruct -tp=2 -pp=2 &
|
|
||||||
server_pid=$!
|
|
||||||
timeout 600 bash -c "until curl localhost:8000/v1/models; do sleep 1; done" || exit 1
|
|
||||||
vllm bench serve \
|
|
||||||
--backend vllm \
|
|
||||||
--dataset-name random \
|
|
||||||
--model meta-llama/Llama-3.2-3B-Instruct \
|
|
||||||
--num-prompts 20 \
|
|
||||||
--endpoint /v1/completions
|
|
||||||
kill -s SIGTERM $server_pid &'
|
|
||||||
|
|
||||||
# online serving: tp+dp
|
|
||||||
docker exec cpu-test-"$NUMA_NODE" bash -c '
|
|
||||||
set -e
|
|
||||||
VLLM_CPU_OMP_THREADS_BIND=$E2E_OMP_THREADS VLLM_CPU_SGL_KERNEL=1 vllm serve meta-llama/Llama-3.2-3B-Instruct -tp=2 -dp=2 &
|
|
||||||
server_pid=$!
|
|
||||||
timeout 600 bash -c "until curl localhost:8000/v1/models; do sleep 1; done" || exit 1
|
|
||||||
vllm bench serve \
|
|
||||||
--backend vllm \
|
|
||||||
--dataset-name random \
|
|
||||||
--model meta-llama/Llama-3.2-3B-Instruct \
|
|
||||||
--num-prompts 20 \
|
|
||||||
--endpoint /v1/completions
|
|
||||||
kill -s SIGTERM $server_pid &'
|
|
||||||
}
|
|
||||||
|
|
||||||
# All of CPU tests are expected to be finished less than 40 mins.
|
|
||||||
export -f cpu_tests
|
|
||||||
timeout 2.5h bash -c "cpu_tests $CORE_RANGE $NUMA_NODE"
|
|
||||||
|
|||||||
@@ -1,17 +1,42 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# This script build the CPU docker image and run the offline inference inside the container.
|
# This script builds the HPU docker image and runs the offline inference inside the container.
|
||||||
# It serves a sanity check for compilation and basic model usage.
|
# It serves a sanity check for compilation and basic model usage.
|
||||||
|
#
|
||||||
|
# vllm-gaudi compatibility pinning:
|
||||||
|
# The vllm-gaudi plugin is installed on top of the vllm upstream checkout used by this CI job.
|
||||||
|
# When upstream vllm changes its API, the plugin may break before it has been updated.
|
||||||
|
# To handle this, the vllm-gaudi repository maintains a file:
|
||||||
|
# vllm/last-good-commit-for-vllm-gaudi/VLLM_COMMUNITY_COMMIT
|
||||||
|
# The first line of that file controls what version of vllm is used inside the Docker image:
|
||||||
|
# - "latest" : no checkout override; the current Buildkite CI commit is used as-is.
|
||||||
|
# - "<commit SHA>" : vllm is checked out to that specific commit before building, pinning
|
||||||
|
# the test to a known-compatible baseline.
|
||||||
|
# To unpin (resume testing against the live vllm tip), set the file content back to "latest".
|
||||||
set -exuo pipefail
|
set -exuo pipefail
|
||||||
|
|
||||||
|
# Fetch the vllm community commit reference from vllm-gaudi (first line only).
|
||||||
|
VLLM_COMMUNITY_COMMIT=$(curl -s \
|
||||||
|
https://raw.githubusercontent.com/vllm-project/vllm-gaudi/vllm/last-good-commit-for-vllm-gaudi/VLLM_COMMUNITY_COMMIT \
|
||||||
|
| head -1 | tr -d '\n')
|
||||||
|
|
||||||
|
echo "Using vllm community commit: ${VLLM_COMMUNITY_COMMIT}"
|
||||||
|
|
||||||
# Try building the docker image
|
# Try building the docker image
|
||||||
image_name="hpu/upstream-vllm-ci:${BUILDKITE_COMMIT}"
|
image_name="hpu/upstream-vllm-ci:${BUILDKITE_COMMIT}"
|
||||||
container_name="hpu-upstream-vllm-ci-${BUILDKITE_COMMIT}-container"
|
container_name="hpu-upstream-vllm-ci-${BUILDKITE_COMMIT}-container"
|
||||||
cat <<EOF | docker build -t ${image_name} -f - .
|
cat <<EOF | docker build -t "${image_name}" -f - .
|
||||||
FROM gaudi-base-image:latest
|
FROM gaudi-base-image:latest
|
||||||
|
|
||||||
COPY ./ /workspace/vllm
|
COPY ./ /workspace/vllm
|
||||||
|
|
||||||
|
# If VLLM_COMMUNITY_COMMIT is a specific commit (not "latest"), check it out to pin vllm
|
||||||
|
# to the version known to be compatible with vllm-gaudi. When the value is "latest",
|
||||||
|
# the current checkout (the Buildkite CI commit) is used unchanged.
|
||||||
|
RUN if [ "${VLLM_COMMUNITY_COMMIT}" != "latest" ]; then \
|
||||||
|
cd /workspace/vllm && git fetch --unshallow 2>/dev/null || true && git checkout ${VLLM_COMMUNITY_COMMIT}; \
|
||||||
|
fi
|
||||||
|
|
||||||
WORKDIR /workspace/vllm
|
WORKDIR /workspace/vllm
|
||||||
|
|
||||||
ENV no_proxy=localhost,127.0.0.1
|
ENV no_proxy=localhost,127.0.0.1
|
||||||
@@ -39,12 +64,12 @@ EOF
|
|||||||
# functions, while other platforms only need one remove_docker_container
|
# functions, while other platforms only need one remove_docker_container
|
||||||
# function.
|
# function.
|
||||||
EXITCODE=1
|
EXITCODE=1
|
||||||
remove_docker_containers() { docker rm -f ${container_name} || true; }
|
remove_docker_containers() { docker rm -f "${container_name}" || true; }
|
||||||
trap 'remove_docker_containers; exit $EXITCODE;' EXIT
|
trap 'remove_docker_containers; exit $EXITCODE;' EXIT
|
||||||
remove_docker_containers
|
remove_docker_containers
|
||||||
|
|
||||||
echo "Running HPU plugin v1 test"
|
echo "Running HPU plugin v1 test"
|
||||||
docker run --rm --runtime=habana --name=${container_name} --network=host \
|
docker run --rm --runtime=habana --name="${container_name}" --network=host \
|
||||||
-e HABANA_VISIBLE_DEVICES=all \
|
-e HABANA_VISIBLE_DEVICES=all \
|
||||||
-e VLLM_SKIP_WARMUP=true \
|
-e VLLM_SKIP_WARMUP=true \
|
||||||
-e PT_HPU_ENABLE_LAZY_COLLECTIVES=true \
|
-e PT_HPU_ENABLE_LAZY_COLLECTIVES=true \
|
||||||
|
|||||||
@@ -41,6 +41,7 @@ get_config() {
|
|||||||
echo "Error: file '${TEST_RUN_CONFIG_FILE}' does not exist in the warehouse" >&2
|
echo "Error: file '${TEST_RUN_CONFIG_FILE}' does not exist in the warehouse" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
# shellcheck source=/dev/null
|
||||||
source "${TEST_RUN_CONFIG_FILE}"
|
source "${TEST_RUN_CONFIG_FILE}"
|
||||||
echo "Base docker image name that get from configuration: ${BASE_IMAGE_NAME}"
|
echo "Base docker image name that get from configuration: ${BASE_IMAGE_NAME}"
|
||||||
return 0
|
return 0
|
||||||
@@ -48,9 +49,8 @@ get_config() {
|
|||||||
|
|
||||||
# get test running configuration.
|
# get test running configuration.
|
||||||
fetch_vllm_test_cfg
|
fetch_vllm_test_cfg
|
||||||
get_config
|
|
||||||
# Check if the function call was successful. If not, exit the script.
|
# Check if the function call was successful. If not, exit the script.
|
||||||
if [ $? -ne 0 ]; then
|
if ! get_config; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -62,14 +62,14 @@ agent_idx=$(echo "${BUILDKITE_AGENT_NAME}" | awk -F'-' '{print $(NF-1)}')
|
|||||||
echo "agent_idx: ${agent_idx}"
|
echo "agent_idx: ${agent_idx}"
|
||||||
builder_name="cachebuilder${agent_idx}"
|
builder_name="cachebuilder${agent_idx}"
|
||||||
builder_cache_dir="/mnt/docker-cache${agent_idx}"
|
builder_cache_dir="/mnt/docker-cache${agent_idx}"
|
||||||
mkdir -p ${builder_cache_dir}
|
mkdir -p "${builder_cache_dir}"
|
||||||
|
|
||||||
# Try building the docker image
|
# Try building the docker image
|
||||||
cat <<EOF | DOCKER_BUILDKIT=1 docker build \
|
cat <<EOF | DOCKER_BUILDKIT=1 docker build \
|
||||||
--add-host cache-service-vllm.nginx-pypi-cache.svc.cluster.local:${PYPI_CACHE_HOST} \
|
--add-host cache-service-vllm.nginx-pypi-cache.svc.cluster.local:"${PYPI_CACHE_HOST}" \
|
||||||
--builder ${builder_name} --cache-from type=local,src=${builder_cache_dir} \
|
--builder "${builder_name}" --cache-from type=local,src="${builder_cache_dir}" \
|
||||||
--cache-to type=local,dest=${builder_cache_dir},mode=max \
|
--cache-to type=local,dest="${builder_cache_dir}",mode=max \
|
||||||
--progress=plain --load -t ${image_name} -f - .
|
--progress=plain --load -t "${image_name}" -f - .
|
||||||
FROM ${BASE_IMAGE_NAME}
|
FROM ${BASE_IMAGE_NAME}
|
||||||
|
|
||||||
# Define environments
|
# Define environments
|
||||||
@@ -116,7 +116,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
|||||||
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \
|
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi && \
|
||||||
source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
|
source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
|
||||||
source /usr/local/Ascend/nnal/atb/set_env.sh && \
|
source /usr/local/Ascend/nnal/atb/set_env.sh && \
|
||||||
export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \
|
export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/$(uname -i)-linux/devlib && \
|
||||||
python3 -m pip install -v -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/
|
python3 -m pip install -v -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/
|
||||||
|
|
||||||
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||||
@@ -139,7 +139,7 @@ trap remove_docker_container EXIT
|
|||||||
# Generate corresponding --device args based on BUILDKITE_AGENT_NAME
|
# Generate corresponding --device args based on BUILDKITE_AGENT_NAME
|
||||||
# Ascend NPU BUILDKITE_AGENT_NAME format is {hostname}-{agent_idx}-{npu_card_num}cards, and agent_idx starts from 1.
|
# Ascend NPU BUILDKITE_AGENT_NAME format is {hostname}-{agent_idx}-{npu_card_num}cards, and agent_idx starts from 1.
|
||||||
# e.g. atlas-a2-001-1-2cards means this is the 1-th agent on atlas-a2-001 host, and it has 2 NPU cards.
|
# e.g. atlas-a2-001-1-2cards means this is the 1-th agent on atlas-a2-001 host, and it has 2 NPU cards.
|
||||||
# returns --device /dev/davinci0 --device /dev/davinci1
|
# returns one argument per line: --device, /dev/davinciX, ...
|
||||||
parse_and_gen_devices() {
|
parse_and_gen_devices() {
|
||||||
local input="$1"
|
local input="$1"
|
||||||
local index cards_num
|
local index cards_num
|
||||||
@@ -151,29 +151,24 @@ parse_and_gen_devices() {
|
|||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local devices=""
|
|
||||||
local i=0
|
local i=0
|
||||||
while (( i < cards_num )); do
|
while (( i < cards_num )); do
|
||||||
local dev_idx=$(((index - 1)*cards_num + i ))
|
local dev_idx=$(((index - 1)*cards_num + i ))
|
||||||
devices="$devices --device /dev/davinci${dev_idx}"
|
printf '%s\n' "--device"
|
||||||
|
printf '%s\n' "/dev/davinci${dev_idx}"
|
||||||
((i++))
|
((i++))
|
||||||
done
|
done
|
||||||
|
|
||||||
# trim leading space
|
|
||||||
devices="${devices#"${devices%%[![:space:]]*}"}"
|
|
||||||
# Output devices: assigned to the caller variable
|
|
||||||
printf '%s' "$devices"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
devices=$(parse_and_gen_devices "${BUILDKITE_AGENT_NAME}") || exit 1
|
mapfile -t device_args < <(parse_and_gen_devices "${BUILDKITE_AGENT_NAME}") || exit 1
|
||||||
|
|
||||||
# Run the image and execute the Out-Of-Tree (OOT) platform interface test case on Ascend NPU hardware.
|
# Run the image and execute the Out-Of-Tree (OOT) platform interface test case on Ascend NPU hardware.
|
||||||
# This test checks whether the OOT platform interface is functioning properly in conjunction with
|
# This test checks whether the OOT platform interface is functioning properly in conjunction with
|
||||||
# the hardware plugin vllm-ascend.
|
# the hardware plugin vllm-ascend.
|
||||||
model_cache_dir=/mnt/modelscope${agent_idx}
|
model_cache_dir=/mnt/modelscope${agent_idx}
|
||||||
mkdir -p ${model_cache_dir}
|
mkdir -p "${model_cache_dir}"
|
||||||
docker run \
|
docker run \
|
||||||
${devices} \
|
"${device_args[@]}" \
|
||||||
--device /dev/davinci_manager \
|
--device /dev/davinci_manager \
|
||||||
--device /dev/devmm_svm \
|
--device /dev/devmm_svm \
|
||||||
--device /dev/hisi_hdc \
|
--device /dev/hisi_hdc \
|
||||||
@@ -182,7 +177,7 @@ docker run \
|
|||||||
-v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ \
|
-v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ \
|
||||||
-v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \
|
-v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \
|
||||||
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
||||||
-v ${model_cache_dir}:/root/.cache/modelscope \
|
-v "${model_cache_dir}":/root/.cache/modelscope \
|
||||||
--entrypoint="" \
|
--entrypoint="" \
|
||||||
--name "${container_name}" \
|
--name "${container_name}" \
|
||||||
"${image_name}" \
|
"${image_name}" \
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ echo "Results will be stored in: $RESULTS_DIR"
|
|||||||
echo "--- Installing Python dependencies ---"
|
echo "--- Installing Python dependencies ---"
|
||||||
python3 -m pip install --progress-bar off git+https://github.com/thuml/depyf.git \
|
python3 -m pip install --progress-bar off git+https://github.com/thuml/depyf.git \
|
||||||
&& python3 -m pip install --progress-bar off pytest pytest-asyncio tpu-info \
|
&& python3 -m pip install --progress-bar off pytest pytest-asyncio tpu-info \
|
||||||
&& python3 -m pip install --progress-bar off "lm-eval[api]>=0.4.9.2" \
|
&& python3 -m pip install --progress-bar off "lm-eval[api]>=0.4.11" \
|
||||||
&& python3 -m pip install --progress-bar off hf-transfer tblib==3.1.0
|
&& python3 -m pip install --progress-bar off hf-transfer tblib==3.1.0
|
||||||
echo "--- Python dependencies installed ---"
|
echo "--- Python dependencies installed ---"
|
||||||
|
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ echo "Results will be stored in: $RESULTS_DIR"
|
|||||||
echo "--- Installing Python dependencies ---"
|
echo "--- Installing Python dependencies ---"
|
||||||
python3 -m pip install --progress-bar off git+https://github.com/thuml/depyf.git \
|
python3 -m pip install --progress-bar off git+https://github.com/thuml/depyf.git \
|
||||||
&& python3 -m pip install --progress-bar off pytest pytest-asyncio tpu-info \
|
&& python3 -m pip install --progress-bar off pytest pytest-asyncio tpu-info \
|
||||||
&& python3 -m pip install --progress-bar off "lm-eval[api]>=0.4.9.2" \
|
&& python3 -m pip install --progress-bar off "lm-eval[api]>=0.4.11" \
|
||||||
&& python3 -m pip install --progress-bar off hf-transfer tblib==3.1.0
|
&& python3 -m pip install --progress-bar off hf-transfer tblib==3.1.0
|
||||||
echo "--- Python dependencies installed ---"
|
echo "--- Python dependencies installed ---"
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ image_name="xpu/vllm-ci:${BUILDKITE_COMMIT}"
|
|||||||
container_name="xpu_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
container_name="xpu_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
|
||||||
|
|
||||||
# Try building the docker image
|
# Try building the docker image
|
||||||
docker build -t ${image_name} -f docker/Dockerfile.xpu .
|
docker build -t "${image_name}" -f docker/Dockerfile.xpu .
|
||||||
|
|
||||||
# Setup cleanup
|
# Setup cleanup
|
||||||
remove_docker_container() {
|
remove_docker_container() {
|
||||||
@@ -39,6 +39,8 @@ docker run \
|
|||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager -tp 2 --distributed-executor-backend ray
|
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager -tp 2 --distributed-executor-backend ray
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager -tp 2 --distributed-executor-backend mp
|
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager -tp 2 --distributed-executor-backend mp
|
||||||
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager --attention-backend=TRITON_ATTN
|
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager --attention-backend=TRITON_ATTN
|
||||||
|
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager --quantization fp8
|
||||||
|
python3 examples/offline_inference/basic/generate.py --model superjob/Qwen3-4B-Instruct-2507-GPTQ-Int4 --block-size 64 --enforce-eager
|
||||||
python3 examples/offline_inference/basic/generate.py --model ibm-research/PowerMoE-3b --block-size 64 --enforce-eager -tp 2
|
python3 examples/offline_inference/basic/generate.py --model ibm-research/PowerMoE-3b --block-size 64 --enforce-eager -tp 2
|
||||||
python3 examples/offline_inference/basic/generate.py --model ibm-research/PowerMoE-3b --block-size 64 --enforce-eager -tp 2 --enable-expert-parallel
|
python3 examples/offline_inference/basic/generate.py --model ibm-research/PowerMoE-3b --block-size 64 --enforce-eager -tp 2 --enable-expert-parallel
|
||||||
cd tests
|
cd tests
|
||||||
|
|||||||
@@ -21,16 +21,16 @@ echo "Pushing original tag $ORIG_TAG_NAME$ORIG_TAG_SUFFIX to new nightly tag nam
|
|||||||
|
|
||||||
# pull original arch-dependent images from AWS ECR Public
|
# pull original arch-dependent images from AWS ECR Public
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7
|
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:$ORIG_TAG_NAME-x86_64$ORIG_TAG_SUFFIX
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:"$ORIG_TAG_NAME"-x86_64"$ORIG_TAG_SUFFIX"
|
||||||
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:$ORIG_TAG_NAME-aarch64$ORIG_TAG_SUFFIX
|
docker pull public.ecr.aws/q9t5s3a7/vllm-release-repo:"$ORIG_TAG_NAME"-aarch64"$ORIG_TAG_SUFFIX"
|
||||||
# tag arch-dependent images
|
# tag arch-dependent images
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$ORIG_TAG_NAME-x86_64$ORIG_TAG_SUFFIX vllm/vllm-openai:$TAG_NAME-x86_64
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:"$ORIG_TAG_NAME"-x86_64"$ORIG_TAG_SUFFIX" vllm/vllm-openai:"$TAG_NAME"-x86_64
|
||||||
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$ORIG_TAG_NAME-aarch64$ORIG_TAG_SUFFIX vllm/vllm-openai:$TAG_NAME-aarch64
|
docker tag public.ecr.aws/q9t5s3a7/vllm-release-repo:"$ORIG_TAG_NAME"-aarch64"$ORIG_TAG_SUFFIX" vllm/vllm-openai:"$TAG_NAME"-aarch64
|
||||||
# push arch-dependent images to DockerHub
|
# push arch-dependent images to DockerHub
|
||||||
docker push vllm/vllm-openai:$TAG_NAME-x86_64
|
docker push vllm/vllm-openai:"$TAG_NAME"-x86_64
|
||||||
docker push vllm/vllm-openai:$TAG_NAME-aarch64
|
docker push vllm/vllm-openai:"$TAG_NAME"-aarch64
|
||||||
# push arch-independent manifest to DockerHub
|
# push arch-independent manifest to DockerHub
|
||||||
docker manifest create vllm/vllm-openai:$TAG_NAME vllm/vllm-openai:$TAG_NAME-x86_64 vllm/vllm-openai:$TAG_NAME-aarch64 --amend
|
docker manifest create vllm/vllm-openai:"$TAG_NAME" vllm/vllm-openai:"$TAG_NAME"-x86_64 vllm/vllm-openai:"$TAG_NAME"-aarch64 --amend
|
||||||
docker manifest create vllm/vllm-openai:$TAG_NAME-$BUILDKITE_COMMIT vllm/vllm-openai:$TAG_NAME-x86_64 vllm/vllm-openai:$TAG_NAME-aarch64 --amend
|
docker manifest create vllm/vllm-openai:"$TAG_NAME"-"$BUILDKITE_COMMIT" vllm/vllm-openai:"$TAG_NAME"-x86_64 vllm/vllm-openai:"$TAG_NAME"-aarch64 --amend
|
||||||
docker manifest push vllm/vllm-openai:$TAG_NAME
|
docker manifest push vllm/vllm-openai:"$TAG_NAME"
|
||||||
docker manifest push vllm/vllm-openai:$TAG_NAME-$BUILDKITE_COMMIT
|
docker manifest push vllm/vllm-openai:"$TAG_NAME"-"$BUILDKITE_COMMIT"
|
||||||
|
|||||||
@@ -1,64 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
|
|
||||||
# Setup script for Prime-RL integration tests
|
|
||||||
# This script prepares the environment for running Prime-RL tests with nightly vLLM
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
||||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
|
||||||
PRIME_RL_REPO="https://github.com/PrimeIntellect-ai/prime-rl.git"
|
|
||||||
PRIME_RL_DIR="${REPO_ROOT}/prime-rl"
|
|
||||||
|
|
||||||
if command -v rocm-smi &> /dev/null || command -v rocminfo &> /dev/null; then
|
|
||||||
echo "AMD GPU detected. Prime-RL currently only supports NVIDIA. Skipping..."
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Setting up Prime-RL integration test environment..."
|
|
||||||
|
|
||||||
# Clean up any existing Prime-RL directory
|
|
||||||
if [ -d "${PRIME_RL_DIR}" ]; then
|
|
||||||
echo "Removing existing Prime-RL directory..."
|
|
||||||
rm -rf "${PRIME_RL_DIR}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install UV if not available
|
|
||||||
if ! command -v uv &> /dev/null; then
|
|
||||||
echo "Installing UV package manager..."
|
|
||||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
||||||
source $HOME/.local/bin/env
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Clone Prime-RL repository at specific branch for reproducible tests
|
|
||||||
PRIME_RL_BRANCH="integ-vllm-main"
|
|
||||||
echo "Cloning Prime-RL repository at branch: ${PRIME_RL_BRANCH}..."
|
|
||||||
git clone --branch "${PRIME_RL_BRANCH}" --single-branch "${PRIME_RL_REPO}" "${PRIME_RL_DIR}"
|
|
||||||
cd "${PRIME_RL_DIR}"
|
|
||||||
|
|
||||||
echo "Setting up UV project environment..."
|
|
||||||
export UV_PROJECT_ENVIRONMENT=/usr/local
|
|
||||||
ln -s /usr/bin/python3 /usr/local/bin/python
|
|
||||||
|
|
||||||
# Remove vllm pin from pyproject.toml
|
|
||||||
echo "Removing vllm pin from pyproject.toml..."
|
|
||||||
sed -i '/vllm==/d' pyproject.toml
|
|
||||||
|
|
||||||
# Sync Prime-RL dependencies
|
|
||||||
echo "Installing Prime-RL dependencies..."
|
|
||||||
uv sync --inexact && uv sync --inexact --all-extras
|
|
||||||
|
|
||||||
# Verify installation
|
|
||||||
echo "Verifying installations..."
|
|
||||||
uv run python -c "import vllm; print(f'vLLM version: {vllm.__version__}')"
|
|
||||||
uv run python -c "import prime_rl; print('Prime-RL imported successfully')"
|
|
||||||
|
|
||||||
echo "Prime-RL integration test environment setup complete!"
|
|
||||||
|
|
||||||
echo "Running Prime-RL integration tests..."
|
|
||||||
export WANDB_MODE=offline # this makes this test not require a WANDB_API_KEY
|
|
||||||
uv run pytest -vs tests/integration/test_rl.py -m gpu
|
|
||||||
|
|
||||||
echo "Prime-RL integration tests completed!"
|
|
||||||
@@ -51,14 +51,14 @@ for BACK in "${BACKENDS[@]}"; do
|
|||||||
--enable-eplb \
|
--enable-eplb \
|
||||||
--trust-remote-code \
|
--trust-remote-code \
|
||||||
--max-model-len 2048 \
|
--max-model-len 2048 \
|
||||||
--all2all-backend $BACK \
|
--all2all-backend "$BACK" \
|
||||||
--port $PORT &
|
--port "$PORT" &
|
||||||
SERVER_PID=$!
|
SERVER_PID=$!
|
||||||
wait_for_server $PORT
|
wait_for_server "$PORT"
|
||||||
|
|
||||||
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
||||||
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
||||||
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port $PORT --num-questions ${NUM_Q} --save-results ${OUT}
|
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port "$PORT" --num-questions "${NUM_Q}" --save-results "${OUT}"
|
||||||
python3 - <<PY
|
python3 - <<PY
|
||||||
import json; acc=json.load(open('${OUT}'))['accuracy']
|
import json; acc=json.load(open('${OUT}'))['accuracy']
|
||||||
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
||||||
|
|||||||
@@ -0,0 +1,57 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euxo pipefail
|
||||||
|
|
||||||
|
# Nightly e2e test for prefetch offloading with a MoE model.
|
||||||
|
# Runs DeepSeek-V2-Lite with prefetch offloading of MoE expert weights
|
||||||
|
# and validates GSM8K accuracy matches baseline (no offloading).
|
||||||
|
#
|
||||||
|
# args: [THRESHOLD] [NUM_QUESTIONS] [START_PORT]
|
||||||
|
THRESHOLD=${1:-0.25}
|
||||||
|
NUM_Q=${2:-1319}
|
||||||
|
PORT=${3:-8030}
|
||||||
|
OUT_DIR=${OUT_DIR:-/tmp/vllm-scheduled}
|
||||||
|
mkdir -p "${OUT_DIR}"
|
||||||
|
|
||||||
|
wait_for_server() {
|
||||||
|
local port=$1
|
||||||
|
timeout 600 bash -c '
|
||||||
|
until curl -sf "http://127.0.0.1:'"$port"'/health" > /dev/null; do
|
||||||
|
sleep 1
|
||||||
|
done'
|
||||||
|
}
|
||||||
|
|
||||||
|
MODEL="deepseek-ai/DeepSeek-V2-Lite"
|
||||||
|
|
||||||
|
cleanup() {
|
||||||
|
if [[ -n "${SERVER_PID:-}" ]] && kill -0 "${SERVER_PID}" 2>/dev/null; then
|
||||||
|
kill "${SERVER_PID}" 2>/dev/null || true
|
||||||
|
for _ in {1..20}; do
|
||||||
|
kill -0 "${SERVER_PID}" 2>/dev/null || break
|
||||||
|
sleep 0.5
|
||||||
|
done
|
||||||
|
kill -9 "${SERVER_PID}" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
trap cleanup EXIT
|
||||||
|
|
||||||
|
vllm serve "$MODEL" \
|
||||||
|
--max-model-len 2048 \
|
||||||
|
--offload-group-size 8 \
|
||||||
|
--offload-num-in-group 2 \
|
||||||
|
--offload-prefetch-step 1 \
|
||||||
|
--offload-params w13_weight w2_weight \
|
||||||
|
--port "$PORT" &
|
||||||
|
SERVER_PID=$!
|
||||||
|
wait_for_server "$PORT"
|
||||||
|
|
||||||
|
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
||||||
|
OUT="${OUT_DIR}/${TAG}_prefetch_offload.json"
|
||||||
|
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port "$PORT" --num-questions "${NUM_Q}" --save-results "${OUT}"
|
||||||
|
python3 - <<PY
|
||||||
|
import json; acc=json.load(open('${OUT}'))['accuracy']
|
||||||
|
print(f"${MODEL} prefetch_offload: accuracy {acc:.3f}")
|
||||||
|
assert acc >= ${THRESHOLD}, f"${MODEL} prefetch_offload accuracy {acc}"
|
||||||
|
PY
|
||||||
|
|
||||||
|
cleanup
|
||||||
|
SERVER_PID=
|
||||||
@@ -47,20 +47,20 @@ for BACK in "${BACKENDS[@]}"; do
|
|||||||
vllm serve "$MODEL" \
|
vllm serve "$MODEL" \
|
||||||
--enforce-eager \
|
--enforce-eager \
|
||||||
--enable-eplb \
|
--enable-eplb \
|
||||||
--all2all-backend $BACK \
|
--all2all-backend "$BACK" \
|
||||||
--eplb-config '{"window_size":10, "step_interval":100, "num_redundant_experts":0, "log_balancedness":true}' \
|
--eplb-config '{"window_size":10, "step_interval":100, "num_redundant_experts":0, "log_balancedness":true}' \
|
||||||
--tensor-parallel-size ${TENSOR_PARALLEL_SIZE} \
|
--tensor-parallel-size "${TENSOR_PARALLEL_SIZE}" \
|
||||||
--data-parallel-size ${DATA_PARALLEL_SIZE} \
|
--data-parallel-size "${DATA_PARALLEL_SIZE}" \
|
||||||
--enable-expert-parallel \
|
--enable-expert-parallel \
|
||||||
--trust-remote-code \
|
--trust-remote-code \
|
||||||
--max-model-len 2048 \
|
--max-model-len 2048 \
|
||||||
--port $PORT &
|
--port "$PORT" &
|
||||||
SERVER_PID=$!
|
SERVER_PID=$!
|
||||||
wait_for_server $PORT
|
wait_for_server "$PORT"
|
||||||
|
|
||||||
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
||||||
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
||||||
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port $PORT --num-questions ${NUM_Q} --save-results ${OUT}
|
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port "$PORT" --num-questions "${NUM_Q}" --save-results "${OUT}"
|
||||||
python3 - <<PY
|
python3 - <<PY
|
||||||
import json; acc=json.load(open('${OUT}'))['accuracy']
|
import json; acc=json.load(open('${OUT}'))['accuracy']
|
||||||
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
||||||
|
|||||||
@@ -51,20 +51,20 @@ for BACK in "${BACKENDS[@]}"; do
|
|||||||
--tensor-parallel-size 4 \
|
--tensor-parallel-size 4 \
|
||||||
--enable-expert-parallel \
|
--enable-expert-parallel \
|
||||||
--enable-eplb \
|
--enable-eplb \
|
||||||
--all2all-backend $BACK \
|
--all2all-backend "$BACK" \
|
||||||
--eplb-config '{"window_size":200,"step_interval":600,"use_async":true}' \
|
--eplb-config '{"window_size":200,"step_interval":600,"use_async":true}' \
|
||||||
--speculative-config '{"method":"qwen3_next_mtp","num_speculative_tokens":1}' \
|
--speculative-config '{"method":"qwen3_next_mtp","num_speculative_tokens":1}' \
|
||||||
--trust-remote-code \
|
--trust-remote-code \
|
||||||
--max-model-len 2048 \
|
--max-model-len 2048 \
|
||||||
--gpu-memory-utilization 0.9 \
|
--gpu-memory-utilization 0.9 \
|
||||||
"${PLATFORM_ARGS[@]}" \
|
"${PLATFORM_ARGS[@]}" \
|
||||||
--port $PORT &
|
--port "$PORT" &
|
||||||
SERVER_PID=$!
|
SERVER_PID=$!
|
||||||
wait_for_server $PORT
|
wait_for_server "$PORT"
|
||||||
|
|
||||||
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
||||||
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
||||||
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port $PORT --num-questions ${NUM_Q} --save-results ${OUT}
|
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port "$PORT" --num-questions "${NUM_Q}" --save-results "${OUT}"
|
||||||
python3 - <<PY
|
python3 - <<PY
|
||||||
import json; acc=json.load(open('${OUT}'))['accuracy']
|
import json; acc=json.load(open('${OUT}'))['accuracy']
|
||||||
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
||||||
|
|||||||
@@ -9,10 +9,11 @@ ENV_FILE=$1
|
|||||||
|
|
||||||
# For testing on local vm, use `set -a` to export all variables
|
# For testing on local vm, use `set -a` to export all variables
|
||||||
source /etc/environment
|
source /etc/environment
|
||||||
source $ENV_FILE
|
# shellcheck source=/dev/null
|
||||||
|
source "$ENV_FILE"
|
||||||
|
|
||||||
remove_docker_container() {
|
remove_docker_container() {
|
||||||
docker rm -f $CONTAINER_NAME || true;
|
docker rm -f "$CONTAINER_NAME" || true;
|
||||||
}
|
}
|
||||||
|
|
||||||
trap remove_docker_container EXIT
|
trap remove_docker_container EXIT
|
||||||
@@ -41,13 +42,13 @@ echo
|
|||||||
echo "starting docker...$CONTAINER_NAME"
|
echo "starting docker...$CONTAINER_NAME"
|
||||||
echo
|
echo
|
||||||
docker run \
|
docker run \
|
||||||
-v $DOWNLOAD_DIR:$DOWNLOAD_DIR \
|
-v "$DOWNLOAD_DIR":"$DOWNLOAD_DIR" \
|
||||||
--env-file $ENV_FILE \
|
--env-file "$ENV_FILE" \
|
||||||
-e HF_TOKEN="$HF_TOKEN" \
|
-e HF_TOKEN="$HF_TOKEN" \
|
||||||
-e TARGET_COMMIT=$BUILDKITE_COMMIT \
|
-e TARGET_COMMIT="$BUILDKITE_COMMIT" \
|
||||||
-e MODEL=$MODEL \
|
-e MODEL="$MODEL" \
|
||||||
-e WORKSPACE=/workspace \
|
-e WORKSPACE=/workspace \
|
||||||
--name $CONTAINER_NAME \
|
--name "$CONTAINER_NAME" \
|
||||||
-d \
|
-d \
|
||||||
--privileged \
|
--privileged \
|
||||||
--network host \
|
--network host \
|
||||||
|
|||||||
@@ -42,21 +42,21 @@ echo "lanching vllm..."
|
|||||||
echo "logging to $VLLM_LOG"
|
echo "logging to $VLLM_LOG"
|
||||||
echo
|
echo
|
||||||
|
|
||||||
vllm serve $MODEL \
|
vllm serve "$MODEL" \
|
||||||
--seed 42 \
|
--seed 42 \
|
||||||
--max-num-seqs $MAX_NUM_SEQS \
|
--max-num-seqs "$MAX_NUM_SEQS" \
|
||||||
--max-num-batched-tokens $MAX_NUM_BATCHED_TOKENS \
|
--max-num-batched-tokens "$MAX_NUM_BATCHED_TOKENS" \
|
||||||
--tensor-parallel-size $TENSOR_PARALLEL_SIZE \
|
--tensor-parallel-size "$TENSOR_PARALLEL_SIZE" \
|
||||||
--no-enable-prefix-caching \
|
--no-enable-prefix-caching \
|
||||||
--download_dir $DOWNLOAD_DIR \
|
--download_dir "$DOWNLOAD_DIR" \
|
||||||
--max-model-len $MAX_MODEL_LEN > "$VLLM_LOG" 2>&1 &
|
--max-model-len "$MAX_MODEL_LEN" > "$VLLM_LOG" 2>&1 &
|
||||||
|
|
||||||
|
|
||||||
echo "wait for 20 minutes.."
|
echo "wait for 20 minutes.."
|
||||||
echo
|
echo
|
||||||
# sleep 1200
|
# sleep 1200
|
||||||
# wait for 10 minutes...
|
# wait for 10 minutes...
|
||||||
for i in {1..120}; do
|
for _ in {1..120}; do
|
||||||
# TODO: detect other type of errors.
|
# TODO: detect other type of errors.
|
||||||
if grep -Fq "raise RuntimeError" "$VLLM_LOG"; then
|
if grep -Fq "raise RuntimeError" "$VLLM_LOG"; then
|
||||||
echo "Detected RuntimeError, exiting."
|
echo "Detected RuntimeError, exiting."
|
||||||
@@ -78,11 +78,11 @@ echo "logging to $BM_LOG"
|
|||||||
echo
|
echo
|
||||||
vllm bench serve \
|
vllm bench serve \
|
||||||
--backend vllm \
|
--backend vllm \
|
||||||
--model $MODEL \
|
--model "$MODEL" \
|
||||||
--dataset-name sonnet \
|
--dataset-name sonnet \
|
||||||
--dataset-path benchmarks/sonnet_4x.txt \
|
--dataset-path benchmarks/sonnet_4x.txt \
|
||||||
--sonnet-input-len $INPUT_LEN \
|
--sonnet-input-len "$INPUT_LEN" \
|
||||||
--sonnet-output-len $OUTPUT_LEN \
|
--sonnet-output-len "$OUTPUT_LEN" \
|
||||||
--ignore-eos > "$BM_LOG"
|
--ignore-eos > "$BM_LOG"
|
||||||
|
|
||||||
echo "completed..."
|
echo "completed..."
|
||||||
|
|||||||
@@ -76,16 +76,15 @@ mkdir -p "$INDICES_OUTPUT_DIR"
|
|||||||
# this indices have relative paths that could work as long as it is next to the wheel directory in s3
|
# this indices have relative paths that could work as long as it is next to the wheel directory in s3
|
||||||
# i.e., the wheels are always in s3://vllm-wheels/<commit>/
|
# i.e., the wheels are always in s3://vllm-wheels/<commit>/
|
||||||
# and indices can be placed in /<commit>/, or /nightly/, or /<version>/
|
# and indices can be placed in /<commit>/, or /nightly/, or /<version>/
|
||||||
if [[ ! -z "$DEFAULT_VARIANT_ALIAS" ]]; then
|
alias_args=()
|
||||||
alias_arg="--alias-to-default $DEFAULT_VARIANT_ALIAS"
|
if [[ -n "$DEFAULT_VARIANT_ALIAS" ]]; then
|
||||||
else
|
alias_args=(--alias-to-default "$DEFAULT_VARIANT_ALIAS")
|
||||||
alias_arg=""
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# HACK: we do not need regex module here, but it is required by pre-commit hook
|
# HACK: we do not need regex module here, but it is required by pre-commit hook
|
||||||
# To avoid any external dependency, we simply replace it back to the stdlib re module
|
# To avoid any external dependency, we simply replace it back to the stdlib re module
|
||||||
sed -i 's/import regex as re/import re/g' .buildkite/scripts/generate-nightly-index.py
|
sed -i 's/import regex as re/import re/g' .buildkite/scripts/generate-nightly-index.py
|
||||||
$PYTHON .buildkite/scripts/generate-nightly-index.py --version "$SUBPATH" --current-objects "$obj_json" --output-dir "$INDICES_OUTPUT_DIR" --comment "commit $BUILDKITE_COMMIT" $alias_arg
|
$PYTHON .buildkite/scripts/generate-nightly-index.py --version "$SUBPATH" --current-objects "$obj_json" --output-dir "$INDICES_OUTPUT_DIR" --comment "commit $BUILDKITE_COMMIT" "${alias_args[@]}"
|
||||||
|
|
||||||
# copy indices to /<commit>/ unconditionally
|
# copy indices to /<commit>/ unconditionally
|
||||||
echo "Uploading indices to $S3_COMMIT_PREFIX"
|
echo "Uploading indices to $S3_COMMIT_PREFIX"
|
||||||
@@ -100,9 +99,9 @@ fi
|
|||||||
# re-generate and copy to /<pure_version>/ only if it does not have "dev" in the version
|
# re-generate and copy to /<pure_version>/ only if it does not have "dev" in the version
|
||||||
if [[ "$version" != *"dev"* ]]; then
|
if [[ "$version" != *"dev"* ]]; then
|
||||||
echo "Re-generating indices for /$pure_version/"
|
echo "Re-generating indices for /$pure_version/"
|
||||||
rm -rf "$INDICES_OUTPUT_DIR/*"
|
rm -rf "${INDICES_OUTPUT_DIR:?}/*"
|
||||||
mkdir -p "$INDICES_OUTPUT_DIR"
|
mkdir -p "$INDICES_OUTPUT_DIR"
|
||||||
# wheel-dir is overridden to be the commit directory, so that the indices point to the correct wheel path
|
# wheel-dir is overridden to be the commit directory, so that the indices point to the correct wheel path
|
||||||
$PYTHON .buildkite/scripts/generate-nightly-index.py --version "$pure_version" --wheel-dir "$SUBPATH" --current-objects "$obj_json" --output-dir "$INDICES_OUTPUT_DIR" --comment "version $pure_version" $alias_arg
|
$PYTHON .buildkite/scripts/generate-nightly-index.py --version "$pure_version" --wheel-dir "$SUBPATH" --current-objects "$obj_json" --output-dir "$INDICES_OUTPUT_DIR" --comment "version $pure_version" "${alias_args[@]}"
|
||||||
aws s3 cp --recursive "$INDICES_OUTPUT_DIR/" "s3://$BUCKET/$pure_version/"
|
aws s3 cp --recursive "$INDICES_OUTPUT_DIR/" "s3://$BUCKET/$pure_version/"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ SUBPATH=$BUILDKITE_COMMIT
|
|||||||
S3_COMMIT_PREFIX="s3://$BUCKET/$SUBPATH/"
|
S3_COMMIT_PREFIX="s3://$BUCKET/$SUBPATH/"
|
||||||
|
|
||||||
RELEASE_VERSION=$(buildkite-agent meta-data get release-version)
|
RELEASE_VERSION=$(buildkite-agent meta-data get release-version)
|
||||||
GIT_VERSION=$(git describe --exact-match --tags $BUILDKITE_COMMIT 2>/dev/null)
|
GIT_VERSION=$(git describe --exact-match --tags "$BUILDKITE_COMMIT" 2>/dev/null)
|
||||||
|
|
||||||
echo "Release version from Buildkite: $RELEASE_VERSION"
|
echo "Release version from Buildkite: $RELEASE_VERSION"
|
||||||
|
|
||||||
@@ -55,7 +55,7 @@ mkdir -p $DIST_DIR
|
|||||||
aws s3 cp --recursive --exclude "*" --include "vllm-${PURE_VERSION}*.whl" --exclude "*dev*" --exclude "*rc[0-9]*" "$S3_COMMIT_PREFIX" $DIST_DIR
|
aws s3 cp --recursive --exclude "*" --include "vllm-${PURE_VERSION}*.whl" --exclude "*dev*" --exclude "*rc[0-9]*" "$S3_COMMIT_PREFIX" $DIST_DIR
|
||||||
echo "Wheels copied to local directory"
|
echo "Wheels copied to local directory"
|
||||||
# generate source tarball
|
# generate source tarball
|
||||||
git archive --format=tar.gz --output="$DIST_DIR/vllm-${PURE_VERSION}.tar.gz" $BUILDKITE_COMMIT
|
git archive --format=tar.gz --output="$DIST_DIR/vllm-${PURE_VERSION}.tar.gz" "$BUILDKITE_COMMIT"
|
||||||
ls -la $DIST_DIR
|
ls -la $DIST_DIR
|
||||||
|
|
||||||
# upload wheels to PyPI (only default variant, i.e. files without '+' in the name)
|
# upload wheels to PyPI (only default variant, i.e. files without '+' in the name)
|
||||||
@@ -65,6 +65,6 @@ if [[ -z "$PYPI_WHEEL_FILES" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
python3 -m twine check $PYPI_WHEEL_FILES
|
python3 -m twine check "$PYPI_WHEEL_FILES"
|
||||||
python3 -m twine upload --non-interactive --verbose $PYPI_WHEEL_FILES
|
python3 -m twine upload --non-interactive --verbose "$PYPI_WHEEL_FILES"
|
||||||
echo "Wheels uploaded to PyPI"
|
echo "Wheels uploaded to PyPI"
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ mkdir -p all-rocm-wheels
|
|||||||
cp artifacts/rocm-base-wheels/*.whl all-rocm-wheels/ 2>/dev/null || true
|
cp artifacts/rocm-base-wheels/*.whl all-rocm-wheels/ 2>/dev/null || true
|
||||||
cp artifacts/rocm-vllm-wheel/*.whl all-rocm-wheels/ 2>/dev/null || true
|
cp artifacts/rocm-vllm-wheel/*.whl all-rocm-wheels/ 2>/dev/null || true
|
||||||
|
|
||||||
WHEEL_COUNT=$(ls all-rocm-wheels/*.whl 2>/dev/null | wc -l)
|
WHEEL_COUNT=$(find all-rocm-wheels -maxdepth 1 -name '*.whl' 2>/dev/null | wc -l)
|
||||||
echo "Total wheels to upload: $WHEEL_COUNT"
|
echo "Total wheels to upload: $WHEEL_COUNT"
|
||||||
|
|
||||||
if [ "$WHEEL_COUNT" -eq 0 ]; then
|
if [ "$WHEEL_COUNT" -eq 0 ]; then
|
||||||
@@ -115,7 +115,7 @@ if [[ "$BUILDKITE_BRANCH" == "main" && "$BUILDKITE_PULL_REQUEST" == "false" ]] |
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Extract version from vLLM wheel and update version-specific index
|
# Extract version from vLLM wheel and update version-specific index
|
||||||
VLLM_WHEEL=$(ls all-rocm-wheels/vllm*.whl 2>/dev/null | head -1)
|
VLLM_WHEEL=$(find all-rocm-wheels -maxdepth 1 -name 'vllm*.whl' 2>/dev/null | head -1)
|
||||||
if [ -n "$VLLM_WHEEL" ]; then
|
if [ -n "$VLLM_WHEEL" ]; then
|
||||||
VERSION=$(unzip -p "$VLLM_WHEEL" '**/METADATA' | grep '^Version: ' | cut -d' ' -f2)
|
VERSION=$(unzip -p "$VLLM_WHEEL" '**/METADATA' | grep '^Version: ' | cut -d' ' -f2)
|
||||||
echo "Version in wheel: $VERSION"
|
echo "Version in wheel: $VERSION"
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -14,3 +14,8 @@ steps:
|
|||||||
- pytest -v -s basic_correctness/test_cumem.py
|
- pytest -v -s basic_correctness/test_cumem.py
|
||||||
- pytest -v -s basic_correctness/test_basic_correctness.py
|
- pytest -v -s basic_correctness/test_basic_correctness.py
|
||||||
- pytest -v -s basic_correctness/test_cpu_offload.py
|
- pytest -v -s basic_correctness/test_cpu_offload.py
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_1
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
|||||||
@@ -17,3 +17,15 @@ steps:
|
|||||||
- tests/benchmarks/
|
- tests/benchmarks/
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s benchmarks/
|
- pytest -v -s benchmarks/
|
||||||
|
|
||||||
|
- label: Attention Benchmarks Smoke Test (B200)
|
||||||
|
device: b200
|
||||||
|
num_gpus: 2
|
||||||
|
optional: true
|
||||||
|
working_dir: "/vllm-workspace/"
|
||||||
|
timeout_in_minutes: 10
|
||||||
|
source_file_dependencies:
|
||||||
|
- benchmarks/attention_benchmarks/
|
||||||
|
- vllm/v1/attention/
|
||||||
|
commands:
|
||||||
|
- python3 benchmarks/attention_benchmarks/benchmark.py --backends flash flashinfer --batch-specs "8q1s1k" --repeats 1 --warmup-iters 1
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ group: Compile
|
|||||||
depends_on:
|
depends_on:
|
||||||
- image-build
|
- image-build
|
||||||
steps:
|
steps:
|
||||||
- label: Sequence Parallel Tests (2 GPUs)
|
- label: Sequence Parallel Correctness Tests (2 GPUs)
|
||||||
timeout_in_minutes: 50
|
timeout_in_minutes: 50
|
||||||
working_dir: "/vllm-workspace/"
|
working_dir: "/vllm-workspace/"
|
||||||
num_devices: 2
|
num_devices: 2
|
||||||
@@ -11,12 +11,12 @@ steps:
|
|||||||
- vllm/compilation/
|
- vllm/compilation/
|
||||||
- vllm/v1/worker/
|
- vllm/v1/worker/
|
||||||
- vllm/v1/cudagraph_dispatcher.py
|
- vllm/v1/cudagraph_dispatcher.py
|
||||||
- tests/distributed/test_sequence_parallel.py
|
- tests/compile/correctness_e2e/test_sequence_parallel.py
|
||||||
commands:
|
commands:
|
||||||
- export VLLM_TEST_CLEAN_GPU_MEMORY=1
|
- export VLLM_TEST_CLEAN_GPU_MEMORY=1
|
||||||
- pytest -v -s tests/distributed/test_sequence_parallel.py
|
- pytest -v -s tests/compile/correctness_e2e/test_sequence_parallel.py
|
||||||
|
|
||||||
- label: Sequence Parallel Tests (2xH100)
|
- label: Sequence Parallel Correctness Tests (2xH100)
|
||||||
timeout_in_minutes: 50
|
timeout_in_minutes: 50
|
||||||
working_dir: "/vllm-workspace/"
|
working_dir: "/vllm-workspace/"
|
||||||
device: h100
|
device: h100
|
||||||
@@ -24,24 +24,30 @@ steps:
|
|||||||
num_devices: 2
|
num_devices: 2
|
||||||
commands:
|
commands:
|
||||||
- export VLLM_TEST_CLEAN_GPU_MEMORY=1
|
- export VLLM_TEST_CLEAN_GPU_MEMORY=1
|
||||||
- pytest -v -s tests/distributed/test_sequence_parallel.py
|
- pytest -v -s tests/compile/correctness_e2e/test_sequence_parallel.py
|
||||||
|
|
||||||
|
- label: AsyncTP Correctness Tests (2xH100)
|
||||||
|
timeout_in_minutes: 50
|
||||||
|
working_dir: "/vllm-workspace/"
|
||||||
|
device: h100
|
||||||
|
optional: true
|
||||||
|
num_devices: 2
|
||||||
|
commands:
|
||||||
|
- export VLLM_TEST_CLEAN_GPU_MEMORY=1
|
||||||
|
- pytest -v -s tests/compile/correctness_e2e/test_async_tp.py
|
||||||
|
|
||||||
- label: Distributed Compile Unit Tests (2xH100)
|
- label: Distributed Compile Unit Tests (2xH100)
|
||||||
timeout_in_minutes: 40
|
timeout_in_minutes: 20
|
||||||
working_dir: "/vllm-workspace/"
|
working_dir: "/vllm-workspace/"
|
||||||
device: h100
|
device: h100
|
||||||
num_devices: 2
|
num_devices: 2
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/compilation/
|
- vllm/compilation/
|
||||||
- vllm/model_executor/layers
|
- vllm/model_executor/layers
|
||||||
- tests/compile/distributed/test_fusion_all_reduce.py
|
- tests/compile/passes/distributed/
|
||||||
- tests/compile/distributed/test_sequence_parallelism.py
|
|
||||||
- tests/compile/distributed/test_async_tp.py
|
|
||||||
commands:
|
commands:
|
||||||
- export VLLM_TEST_CLEAN_GPU_MEMORY=1
|
- export VLLM_TEST_CLEAN_GPU_MEMORY=1
|
||||||
- pytest -v -s tests/compile/distributed/test_fusion_all_reduce.py
|
- pytest -s -v tests/compile/passes/distributed
|
||||||
- pytest -v -s tests/compile/distributed/test_sequence_parallelism.py
|
|
||||||
- pytest -v -s tests/compile/distributed/test_async_tp.py
|
|
||||||
|
|
||||||
- label: Fusion and Compile Unit Tests (B200)
|
- label: Fusion and Compile Unit Tests (B200)
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
@@ -55,17 +61,17 @@ steps:
|
|||||||
- vllm/model_executor/layers/attention/attention.py
|
- vllm/model_executor/layers/attention/attention.py
|
||||||
- vllm/v1/attention/backends/flashinfer.py
|
- vllm/v1/attention/backends/flashinfer.py
|
||||||
- vllm/compilation/ # TODO(luka) limit to vllm/compilation/passes
|
- vllm/compilation/ # TODO(luka) limit to vllm/compilation/passes
|
||||||
- tests/compile/test_fusion_attn.py
|
- tests/compile/passes/test_fusion_attn.py
|
||||||
- tests/compile/test_silu_mul_quant_fusion.py
|
- tests/compile/passes/test_silu_mul_quant_fusion.py
|
||||||
- tests/compile/distributed/test_fusion_all_reduce.py
|
- tests/compile/passes/distributed/test_fusion_all_reduce.py
|
||||||
- tests/compile/fullgraph/test_full_graph.py
|
- tests/compile/fullgraph/test_full_graph.py
|
||||||
commands:
|
commands:
|
||||||
# b200 runners are limited, so we limit the tests to the minimum set only supported on Blackwell
|
# b200 runners are limited, so we limit the tests to the minimum set only supported on Blackwell
|
||||||
- nvidia-smi
|
- nvidia-smi
|
||||||
- pytest -v -s tests/compile/test_fusion_attn.py -k FLASHINFER
|
- pytest -v -s tests/compile/passes/test_fusion_attn.py -k FLASHINFER
|
||||||
- pytest -v -s tests/compile/test_silu_mul_quant_fusion.py
|
- pytest -v -s tests/compile/passes/test_silu_mul_quant_fusion.py
|
||||||
# this runner has 2 GPUs available even though num_devices=2 is not set
|
# this runner has 2 GPUs available even though num_devices=2 is not set
|
||||||
- pytest -v -s tests/compile/distributed/test_fusion_all_reduce.py
|
- pytest -v -s tests/compile/passes/distributed/test_fusion_all_reduce.py
|
||||||
# test_fp8_kv_scale_compile requires FlashAttention (not supported on default L4/L40)
|
# test_fp8_kv_scale_compile requires FlashAttention (not supported on default L4/L40)
|
||||||
# TODO(luka) move to H100 once pass tests run on H100
|
# TODO(luka) move to H100 once pass tests run on H100
|
||||||
- pytest -v -s tests/compile/fullgraph/test_full_graph.py::test_fp8_kv_scale_compile
|
- pytest -v -s tests/compile/fullgraph/test_full_graph.py::test_fp8_kv_scale_compile
|
||||||
@@ -115,13 +121,10 @@ steps:
|
|||||||
optional: true
|
optional: true
|
||||||
commands:
|
commands:
|
||||||
- nvidia-smi
|
- nvidia-smi
|
||||||
# Run all models and attn backends but only Inductor partition and native custom ops
|
# Run all models but only FLASHINFER, Inductor partition and native custom ops
|
||||||
# -k "inductor_partition and not +rms_norm and not +quant_fp8"
|
|
||||||
# Qwen requires +quant_fp8 as -quant_fp8 rms+quant fusion is not supported
|
# Qwen requires +quant_fp8 as -quant_fp8 rms+quant fusion is not supported
|
||||||
# -k "inductor_partition and not +rms_norm and +quant_fp8 and qwen3"
|
# Run just llama3 (fp8 & fp4) for all config combinations (only inductor partition)
|
||||||
# Run just llama3 (fp8 & fp4) for all config combinations
|
- pytest -v -s tests/compile/fusions_e2e/test_tp1_quant.py -k "inductor_partition and (FLASHINFER and not +rms_norm and (not +quant_fp8 or +quant_fp8 and qwen3) or llama-3)"
|
||||||
# -k "llama-3"
|
|
||||||
- pytest -v -s tests/compile/fusions_e2e/test_tp1_quant.py -k "inductor_partition and not +rms_norm and not +quant_fp8" -k "inductor_partition and not +rms_norm and +quant_fp8 and qwen3" -k "llama-3"
|
|
||||||
|
|
||||||
- label: Fusion E2E TP2 Quick (H100)
|
- label: Fusion E2E TP2 Quick (H100)
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
@@ -156,7 +159,7 @@ steps:
|
|||||||
- tests/compile/fusions_e2e/
|
- tests/compile/fusions_e2e/
|
||||||
commands:
|
commands:
|
||||||
- nvidia-smi
|
- nvidia-smi
|
||||||
# Run just llama3 (fp4 & fp8 & bf16) for all config combinations
|
# Run just llama3 (fp8 & bf16) for all config combinations
|
||||||
- pytest -v -s tests/compile/fusions_e2e/test_tp2_ar_rms.py -k "llama-3"
|
- pytest -v -s tests/compile/fusions_e2e/test_tp2_ar_rms.py -k "llama-3"
|
||||||
|
|
||||||
- label: Fusion E2E TP2 AsyncTP Config Sweep (H100)
|
- label: Fusion E2E TP2 AsyncTP Config Sweep (H100)
|
||||||
@@ -191,7 +194,8 @@ steps:
|
|||||||
- tests/compile/fusions_e2e/
|
- tests/compile/fusions_e2e/
|
||||||
commands:
|
commands:
|
||||||
- nvidia-smi
|
- nvidia-smi
|
||||||
# Run all models and attn backends but only Inductor partition and native custom ops
|
# Run all models but only FLASHINFER, Inductor partition and native custom ops
|
||||||
|
# include qwen with +quant_fp8 as -quant_fp8 rms+quant fusion is not supported
|
||||||
# for ar-rms-quant-fp4, also sweep llama3
|
# for ar-rms-quant-fp4, also sweep llama3
|
||||||
- pytest -v -s tests/compile/fusions_e2e/test_tp2_ar_rms.py -k "inductor_partition and not +rms_norm and not +quant_fp8" -k "Llama-3.1-8B-Instruct-FP4"
|
- pytest -v -s tests/compile/fusions_e2e/test_tp2_ar_rms.py -k "(FLASHINFER and inductor_partition and not +rms_norm and (not +quant_fp8 or +quant_fp8 and qwen3)) or Llama-3.1-8B-Instruct-FP4"
|
||||||
- pytest -v -s tests/compile/fusions_e2e/test_tp2_async_tp.py -k "inductor_partition and not +rms_norm and not +quant_fp8"
|
- pytest -v -s tests/compile/fusions_e2e/test_tp2_async_tp.py -k "FLASHINFER and inductor_partition and not +rms_norm and (not +quant_fp8 or +quant_fp8 and qwen3)"
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ steps:
|
|||||||
- tests/cuda
|
- tests/cuda
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s cuda/test_cuda_context.py
|
- pytest -v -s cuda/test_cuda_context.py
|
||||||
|
- pytest -v -s cuda/test_platform_no_cuda_init.py
|
||||||
|
|
||||||
- label: Cudagraph
|
- label: Cudagraph
|
||||||
timeout_in_minutes: 20
|
timeout_in_minutes: 20
|
||||||
|
|||||||
@@ -62,6 +62,7 @@ steps:
|
|||||||
- tests/compile/fullgraph/test_basic_correctness.py
|
- tests/compile/fullgraph/test_basic_correctness.py
|
||||||
- examples/offline_inference/rlhf.py
|
- examples/offline_inference/rlhf.py
|
||||||
- examples/offline_inference/rlhf_colocate.py
|
- examples/offline_inference/rlhf_colocate.py
|
||||||
|
- examples/offline_inference/new_weight_syncing/
|
||||||
- tests/examples/offline_inference/data_parallel.py
|
- tests/examples/offline_inference/data_parallel.py
|
||||||
- tests/v1/distributed
|
- tests/v1/distributed
|
||||||
- tests/v1/engine/test_engine_core_client.py
|
- tests/v1/engine/test_engine_core_client.py
|
||||||
@@ -96,9 +97,14 @@ steps:
|
|||||||
- pytest -v -s distributed/test_symm_mem_allreduce.py
|
- pytest -v -s distributed/test_symm_mem_allreduce.py
|
||||||
# TODO: create a dedicated test section for multi-GPU example tests
|
# TODO: create a dedicated test section for multi-GPU example tests
|
||||||
# when we have multiple distributed example tests
|
# when we have multiple distributed example tests
|
||||||
|
# OLD rlhf examples
|
||||||
- cd ../examples/offline_inference
|
- cd ../examples/offline_inference
|
||||||
- VLLM_ALLOW_INSECURE_SERIALIZATION=1 python3 rlhf.py
|
- VLLM_ALLOW_INSECURE_SERIALIZATION=1 python3 rlhf.py
|
||||||
- VLLM_ALLOW_INSECURE_SERIALIZATION=1 RAY_DEDUP_LOGS=0 python3 rlhf_colocate.py
|
- VLLM_ALLOW_INSECURE_SERIALIZATION=1 RAY_DEDUP_LOGS=0 python3 rlhf_colocate.py
|
||||||
|
# NEW rlhf examples
|
||||||
|
- cd new_weight_syncing
|
||||||
|
- VLLM_ALLOW_INSECURE_SERIALIZATION=1 python3 rlhf_nccl.py
|
||||||
|
- VLLM_ALLOW_INSECURE_SERIALIZATION=1 python3 rlhf_ipc.py
|
||||||
|
|
||||||
- label: Distributed Tests (8 GPUs)(H100)
|
- label: Distributed Tests (8 GPUs)(H100)
|
||||||
timeout_in_minutes: 10
|
timeout_in_minutes: 10
|
||||||
@@ -140,6 +146,7 @@ steps:
|
|||||||
num_devices: 2
|
num_devices: 2
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s tests/distributed/test_context_parallel.py
|
- pytest -v -s tests/distributed/test_context_parallel.py
|
||||||
|
# - VLLM_ALLOW_INSECURE_SERIALIZATION=1 python3 examples/offline_inference/new_weight_syncing/rlhf_async_new_apis.py --- failing, need to re-enable
|
||||||
- VLLM_USE_DEEP_GEMM=1 VLLM_LOGGING_LEVEL=DEBUG python3 examples/offline_inference/data_parallel.py --model=Qwen/Qwen1.5-MoE-A2.7B -tp=1 -dp=2 --max-model-len=2048 --all2all-backend=deepep_high_throughput
|
- VLLM_USE_DEEP_GEMM=1 VLLM_LOGGING_LEVEL=DEBUG python3 examples/offline_inference/data_parallel.py --model=Qwen/Qwen1.5-MoE-A2.7B -tp=1 -dp=2 --max-model-len=2048 --all2all-backend=deepep_high_throughput
|
||||||
- pytest -v -s tests/v1/distributed/test_dbo.py
|
- pytest -v -s tests/v1/distributed/test_dbo.py
|
||||||
|
|
||||||
@@ -159,6 +166,7 @@ steps:
|
|||||||
num_devices: 2
|
num_devices: 2
|
||||||
num_nodes: 2
|
num_nodes: 2
|
||||||
no_plugin: true
|
no_plugin: true
|
||||||
|
optional: true # TODO: revert once infra issue solved
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/distributed/
|
- vllm/distributed/
|
||||||
- vllm/engine/
|
- vllm/engine/
|
||||||
@@ -191,7 +199,18 @@ steps:
|
|||||||
- uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
|
- uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
|
||||||
- DP_EP=1 bash v1/kv_connector/nixl_integration/config_sweep_accuracy_test.sh
|
- DP_EP=1 bash v1/kv_connector/nixl_integration/config_sweep_accuracy_test.sh
|
||||||
|
|
||||||
- label: Pipeline + Context Parallelism (4 GPUs))
|
- label: CrossLayer KV layout Distributed NixlConnector PD accuracy tests (4 GPUs)
|
||||||
|
timeout_in_minutes: 30
|
||||||
|
working_dir: "/vllm-workspace/tests"
|
||||||
|
num_devices: 4
|
||||||
|
source_file_dependencies:
|
||||||
|
- vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py
|
||||||
|
- tests/v1/kv_connector/nixl_integration/
|
||||||
|
commands:
|
||||||
|
- uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
|
||||||
|
- CROSS_LAYERS_BLOCKS=True bash v1/kv_connector/nixl_integration/config_sweep_accuracy_test.sh
|
||||||
|
|
||||||
|
- label: Pipeline + Context Parallelism (4 GPUs)
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
num_devices: 4
|
num_devices: 4
|
||||||
|
|||||||
@@ -29,15 +29,11 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh 0.8 200 8020 2 1
|
- bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh 0.8 200 8020 2 1
|
||||||
|
|
||||||
- label: Prime-RL Integration (2 GPUs)
|
- label: DeepSeek V2-Lite Prefetch Offload Accuracy (H100)
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 60
|
||||||
|
device: h100
|
||||||
optional: true
|
optional: true
|
||||||
soft_fail: true
|
num_devices: 1
|
||||||
num_devices: 2
|
|
||||||
working_dir: "/vllm-workspace"
|
working_dir: "/vllm-workspace"
|
||||||
source_file_dependencies:
|
|
||||||
- vllm/
|
|
||||||
- .buildkite/scripts/run-prime-rl-test.sh
|
|
||||||
commands:
|
commands:
|
||||||
- nvidia-smi
|
- bash .buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_prefetch_offload.sh 0.25 200 8030
|
||||||
- bash .buildkite/scripts/run-prime-rl-test.sh
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- pytest -v -s engine test_sequence.py test_config.py test_logger.py test_vllm_port.py
|
- pytest -v -s engine test_sequence.py test_config.py test_logger.py test_vllm_port.py
|
||||||
|
|
||||||
- label: V1 e2e + engine
|
- label: V1 e2e + engine (1 GPU)
|
||||||
timeout_in_minutes: 45
|
timeout_in_minutes: 45
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
@@ -28,3 +28,43 @@ steps:
|
|||||||
- pytest -v -s v1/engine/test_preprocess_error_handling.py
|
- pytest -v -s v1/engine/test_preprocess_error_handling.py
|
||||||
# Run the rest of v1/engine tests
|
# Run the rest of v1/engine tests
|
||||||
- pytest -v -s v1/engine --ignore v1/engine/test_preprocess_error_handling.py
|
- pytest -v -s v1/engine --ignore v1/engine/test_preprocess_error_handling.py
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_1
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
commands:
|
||||||
|
- pytest -v -s v1/e2e
|
||||||
|
- pytest -v -s v1/engine
|
||||||
|
|
||||||
|
- label: V1 e2e (2 GPUs)
|
||||||
|
timeout_in_minutes: 60 # TODO: Fix timeout after we have more confidence in the test stability
|
||||||
|
optional: true
|
||||||
|
num_devices: 2
|
||||||
|
source_file_dependencies:
|
||||||
|
- vllm/
|
||||||
|
- tests/v1/e2e
|
||||||
|
commands:
|
||||||
|
# Only run tests that need exactly 2 GPUs
|
||||||
|
- pytest -v -s v1/e2e/test_spec_decode.py -k "tensor_parallelism"
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_2
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
|
||||||
|
- label: V1 e2e (4 GPUs)
|
||||||
|
timeout_in_minutes: 60 # TODO: Fix timeout after we have more confidence in the test stability
|
||||||
|
optional: true
|
||||||
|
num_devices: 4
|
||||||
|
source_file_dependencies:
|
||||||
|
- vllm/
|
||||||
|
- tests/v1/e2e
|
||||||
|
commands:
|
||||||
|
# Only run tests that need 4 GPUs
|
||||||
|
- pytest -v -s v1/e2e/test_spec_decode.py -k "eagle_correctness_heavy"
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_4
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
|||||||
@@ -24,6 +24,11 @@ steps:
|
|||||||
- pytest -v -s entrypoints/llm --ignore=entrypoints/llm/test_generate.py --ignore=entrypoints/llm/test_collective_rpc.py
|
- pytest -v -s entrypoints/llm --ignore=entrypoints/llm/test_generate.py --ignore=entrypoints/llm/test_collective_rpc.py
|
||||||
- pytest -v -s entrypoints/llm/test_generate.py # it needs a clean process
|
- pytest -v -s entrypoints/llm/test_generate.py # it needs a clean process
|
||||||
- pytest -v -s entrypoints/offline_mode # Needs to avoid interference with other tests
|
- pytest -v -s entrypoints/offline_mode # Needs to avoid interference with other tests
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_1
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
|
||||||
- label: Entrypoints Integration (API Server 1)
|
- label: Entrypoints Integration (API Server 1)
|
||||||
timeout_in_minutes: 130
|
timeout_in_minutes: 130
|
||||||
@@ -42,15 +47,13 @@ steps:
|
|||||||
working_dir: "/vllm-workspace/tests"
|
working_dir: "/vllm-workspace/tests"
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
- tests/tool_use
|
|
||||||
- tests/entrypoints/sleep
|
|
||||||
- tests/entrypoints/instrumentator
|
|
||||||
- tests/entrypoints/rpc
|
- tests/entrypoints/rpc
|
||||||
|
- tests/entrypoints/instrumentator
|
||||||
|
- tests/tool_use
|
||||||
commands:
|
commands:
|
||||||
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||||
- PYTHONPATH=/vllm-workspace pytest -v -s entrypoints/rpc
|
|
||||||
- pytest -v -s entrypoints/instrumentator
|
- pytest -v -s entrypoints/instrumentator
|
||||||
- pytest -v -s entrypoints/sleep
|
- PYTHONPATH=/vllm-workspace pytest -v -s entrypoints/rpc
|
||||||
- pytest -v -s tool_use
|
- pytest -v -s tool_use
|
||||||
|
|
||||||
- label: Entrypoints Integration (Pooling)
|
- label: Entrypoints Integration (Pooling)
|
||||||
@@ -62,6 +65,11 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||||
- pytest -v -s entrypoints/pooling
|
- pytest -v -s entrypoints/pooling
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_1
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
|
||||||
- label: Entrypoints Integration (Responses API)
|
- label: Entrypoints Integration (Responses API)
|
||||||
timeout_in_minutes: 50
|
timeout_in_minutes: 50
|
||||||
|
|||||||
@@ -20,4 +20,19 @@ steps:
|
|||||||
- tests/distributed/test_eplb_execute.py
|
- tests/distributed/test_eplb_execute.py
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s distributed/test_eplb_execute.py
|
- pytest -v -s distributed/test_eplb_execute.py
|
||||||
- pytest -v -s distributed/test_eplb_spec_decode.py
|
- pytest -v -s distributed/test_eplb_spec_decode.py
|
||||||
|
|
||||||
|
- label: Elastic EP Scaling Test
|
||||||
|
timeout_in_minutes: 20
|
||||||
|
device: b200
|
||||||
|
optional: true
|
||||||
|
working_dir: "/vllm-workspace/tests"
|
||||||
|
num_devices: 4
|
||||||
|
source_file_dependencies:
|
||||||
|
- vllm/distributed/
|
||||||
|
- vllm/engine/
|
||||||
|
- vllm/executor/
|
||||||
|
- vllm/compilation/
|
||||||
|
- tests/distributed/
|
||||||
|
commands:
|
||||||
|
- pytest -v -s distributed/test_elastic_ep.py
|
||||||
|
|||||||
@@ -44,7 +44,8 @@ steps:
|
|||||||
- vllm/envs.py
|
- vllm/envs.py
|
||||||
- vllm/config
|
- vllm/config
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s kernels/moe --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
- pytest -v -s kernels/moe --ignore=kernels/moe/test_modular_oai_triton_moe.py --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
||||||
|
- pytest -v -s kernels/moe/test_modular_oai_triton_moe.py --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
||||||
parallelism: 2
|
parallelism: 2
|
||||||
|
|
||||||
- label: Kernels Mamba Test
|
- label: Kernels Mamba Test
|
||||||
@@ -70,7 +71,7 @@ steps:
|
|||||||
- tests/kernels/moe/test_batched_deepgemm.py
|
- tests/kernels/moe/test_batched_deepgemm.py
|
||||||
- tests/kernels/attention/test_deepgemm_attention.py
|
- tests/kernels/attention/test_deepgemm_attention.py
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s kernels/quantization/test_block_fp8.py -k deep_gemm
|
- pytest -v -s kernels/quantization/test_block_fp8.py
|
||||||
- pytest -v -s kernels/moe/test_deepgemm.py
|
- pytest -v -s kernels/moe/test_deepgemm.py
|
||||||
- pytest -v -s kernels/moe/test_batched_deepgemm.py
|
- pytest -v -s kernels/moe/test_batched_deepgemm.py
|
||||||
- pytest -v -s kernels/attention/test_deepgemm_attention.py
|
- pytest -v -s kernels/attention/test_deepgemm_attention.py
|
||||||
@@ -115,6 +116,7 @@ steps:
|
|||||||
- pytest -v -s tests/kernels/moe/test_nvfp4_moe.py
|
- pytest -v -s tests/kernels/moe/test_nvfp4_moe.py
|
||||||
- pytest -v -s tests/kernels/moe/test_ocp_mx_moe.py
|
- pytest -v -s tests/kernels/moe/test_ocp_mx_moe.py
|
||||||
- pytest -v -s tests/kernels/moe/test_flashinfer.py
|
- pytest -v -s tests/kernels/moe/test_flashinfer.py
|
||||||
|
- pytest -v -s tests/kernels/moe/test_flashinfer_moe.py
|
||||||
- pytest -v -s tests/kernels/moe/test_cutedsl_moe.py
|
- pytest -v -s tests/kernels/moe/test_cutedsl_moe.py
|
||||||
# e2e
|
# e2e
|
||||||
- pytest -v -s tests/models/quantization/test_nvfp4.py
|
- pytest -v -s tests/models/quantization/test_nvfp4.py
|
||||||
@@ -154,9 +156,7 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- pytest -v -s kernels/moe/test_deepep_deepgemm_moe.py
|
- pytest -v -s kernels/moe/test_deepep_deepgemm_moe.py
|
||||||
- pytest -v -s kernels/moe/test_deepep_moe.py
|
- pytest -v -s kernels/moe/test_deepep_moe.py
|
||||||
- pytest -v -s kernels/moe/test_pplx_cutlass_moe.py
|
|
||||||
# - pytest -v -s kernels/moe/test_pplx_moe.py - failing on main
|
|
||||||
|
|
||||||
- label: Kernels Fp4 MoE Test (B200)
|
- label: Kernels Fp4 MoE Test (B200)
|
||||||
timeout_in_minutes: 60
|
timeout_in_minutes: 60
|
||||||
device: b200
|
device: b200
|
||||||
|
|||||||
@@ -11,17 +11,17 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt
|
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt
|
||||||
|
|
||||||
- label: LM Eval Large Models (4 GPUs)(A100)
|
# - label: LM Eval Large Models (4 GPUs)(A100)
|
||||||
device: a100
|
# device: a100
|
||||||
optional: true
|
# optional: true
|
||||||
num_devices: 4
|
# num_devices: 4
|
||||||
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
# working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
||||||
source_file_dependencies:
|
# source_file_dependencies:
|
||||||
- csrc/
|
# - csrc/
|
||||||
- vllm/model_executor/layers/quantization
|
# - vllm/model_executor/layers/quantization
|
||||||
commands:
|
# commands:
|
||||||
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
# - export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||||
- pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large.txt --tp-size=4
|
# - pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large.txt --tp-size=4
|
||||||
|
|
||||||
- label: LM Eval Large Models (4 GPUs)(H100)
|
- label: LM Eval Large Models (4 GPUs)(H100)
|
||||||
device: h100
|
device: h100
|
||||||
@@ -73,3 +73,29 @@ steps:
|
|||||||
num_devices: 2
|
num_devices: 2
|
||||||
commands:
|
commands:
|
||||||
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=evals/gsm8k/configs/moe-refactor-dp-ep/config-b200.txt
|
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=evals/gsm8k/configs/moe-refactor-dp-ep/config-b200.txt
|
||||||
|
|
||||||
|
- label: GPQA Eval (GPT-OSS) (H100)
|
||||||
|
timeout_in_minutes: 120
|
||||||
|
device: h100
|
||||||
|
optional: true
|
||||||
|
num_devices: 2
|
||||||
|
source_file_dependencies:
|
||||||
|
- csrc/
|
||||||
|
- vllm/model_executor/layers/quantization
|
||||||
|
- tests/evals/gpt_oss/
|
||||||
|
commands:
|
||||||
|
- uv pip install --system 'gpt-oss[eval]==0.0.5'
|
||||||
|
- pytest -s -v evals/gpt_oss/test_gpqa_correctness.py --config-list-file=configs/models-h100.txt
|
||||||
|
|
||||||
|
- label: GPQA Eval (GPT-OSS) (B200)
|
||||||
|
timeout_in_minutes: 120
|
||||||
|
device: b200
|
||||||
|
optional: true
|
||||||
|
num_devices: 2
|
||||||
|
source_file_dependencies:
|
||||||
|
- csrc/
|
||||||
|
- vllm/model_executor/layers/quantization
|
||||||
|
- tests/evals/gpt_oss/
|
||||||
|
commands:
|
||||||
|
- uv pip install --system 'gpt-oss[eval]==0.0.5'
|
||||||
|
- pytest -s -v evals/gpt_oss/test_gpqa_correctness.py --config-list-file=configs/models-b200.txt
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ steps:
|
|||||||
- tests/v1
|
- tests/v1
|
||||||
commands:
|
commands:
|
||||||
- uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
|
- uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
|
||||||
|
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||||
# split the test to avoid interference
|
# split the test to avoid interference
|
||||||
- pytest -v -s -m 'not cpu_test' v1/core
|
- pytest -v -s -m 'not cpu_test' v1/core
|
||||||
- pytest -v -s v1/executor
|
- pytest -v -s v1/executor
|
||||||
@@ -16,6 +17,7 @@ steps:
|
|||||||
- pytest -v -s v1/sample
|
- pytest -v -s v1/sample
|
||||||
- pytest -v -s v1/logits_processors
|
- pytest -v -s v1/logits_processors
|
||||||
- pytest -v -s v1/worker
|
- pytest -v -s v1/worker
|
||||||
|
# TODO: create another `optional` test group for slow tests
|
||||||
- pytest -v -s -m 'not slow_test' v1/spec_decode
|
- pytest -v -s -m 'not slow_test' v1/spec_decode
|
||||||
- pytest -v -s -m 'not cpu_test' v1/kv_connector/unit
|
- pytest -v -s -m 'not cpu_test' v1/kv_connector/unit
|
||||||
- pytest -v -s -m 'not cpu_test' v1/metrics
|
- pytest -v -s -m 'not cpu_test' v1/metrics
|
||||||
@@ -25,6 +27,11 @@ steps:
|
|||||||
# Integration test for streaming correctness (requires special branch).
|
# Integration test for streaming correctness (requires special branch).
|
||||||
- pip install -U git+https://github.com/robertgshaw2-redhat/lm-evaluation-harness.git@streaming-api
|
- pip install -U git+https://github.com/robertgshaw2-redhat/lm-evaluation-harness.git@streaming-api
|
||||||
- pytest -v -s entrypoints/openai/correctness/test_lmeval.py::test_lm_eval_accuracy_v1_engine
|
- pytest -v -s entrypoints/openai/correctness/test_lmeval.py::test_lm_eval_accuracy_v1_engine
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_1
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
|
||||||
- label: V1 Others (CPU)
|
- label: V1 Others (CPU)
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -72,7 +79,7 @@ steps:
|
|||||||
- python3 offline_inference/vision_language_multi_image.py --seed 0
|
- python3 offline_inference/vision_language_multi_image.py --seed 0
|
||||||
- python3 offline_inference/encoder_decoder_multimodal.py --model-type whisper --seed 0
|
- python3 offline_inference/encoder_decoder_multimodal.py --model-type whisper --seed 0
|
||||||
# for pooling models
|
# for pooling models
|
||||||
- python3 pooling/pooling/vision_language_pooling.py --seed 0
|
- python3 pooling/embed/vision_embedding_offline.py --seed 0
|
||||||
# for features demo
|
# for features demo
|
||||||
- python3 offline_inference/prefix_caching.py
|
- python3 offline_inference/prefix_caching.py
|
||||||
- python3 offline_inference/llm_engine_example.py
|
- python3 offline_inference/llm_engine_example.py
|
||||||
@@ -108,9 +115,11 @@ steps:
|
|||||||
timeout_in_minutes: 50
|
timeout_in_minutes: 50
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
|
- tests/detokenizer
|
||||||
- tests/multimodal
|
- tests/multimodal
|
||||||
- tests/utils_
|
- tests/utils_
|
||||||
commands:
|
commands:
|
||||||
|
- pytest -v -s detokenizer
|
||||||
- pytest -v -s -m 'not cpu_test' multimodal
|
- pytest -v -s -m 'not cpu_test' multimodal
|
||||||
- pytest -v -s utils_
|
- pytest -v -s utils_
|
||||||
|
|
||||||
@@ -122,6 +131,8 @@ steps:
|
|||||||
- vllm/
|
- vllm/
|
||||||
- tests/test_inputs.py
|
- tests/test_inputs.py
|
||||||
- tests/test_outputs.py
|
- tests/test_outputs.py
|
||||||
|
- tests/test_pooling_params.py
|
||||||
|
- tests/test_ray_env.py
|
||||||
- tests/multimodal
|
- tests/multimodal
|
||||||
- tests/renderers
|
- tests/renderers
|
||||||
- tests/standalone_tests/lazy_imports.py
|
- tests/standalone_tests/lazy_imports.py
|
||||||
@@ -134,6 +145,8 @@ steps:
|
|||||||
- python3 standalone_tests/lazy_imports.py
|
- python3 standalone_tests/lazy_imports.py
|
||||||
- pytest -v -s test_inputs.py
|
- pytest -v -s test_inputs.py
|
||||||
- pytest -v -s test_outputs.py
|
- pytest -v -s test_outputs.py
|
||||||
|
- pytest -v -s test_pooling_params.py
|
||||||
|
- pytest -v -s test_ray_env.py
|
||||||
- pytest -v -s -m 'cpu_test' multimodal
|
- pytest -v -s -m 'cpu_test' multimodal
|
||||||
- pytest -v -s renderers
|
- pytest -v -s renderers
|
||||||
- pytest -v -s tokenizers_
|
- pytest -v -s tokenizers_
|
||||||
@@ -141,20 +154,6 @@ steps:
|
|||||||
- pytest -v -s transformers_utils
|
- pytest -v -s transformers_utils
|
||||||
- pytest -v -s config
|
- pytest -v -s config
|
||||||
|
|
||||||
- label: GPT-OSS Eval (B200)
|
|
||||||
timeout_in_minutes: 60
|
|
||||||
working_dir: "/vllm-workspace/"
|
|
||||||
device: b200
|
|
||||||
optional: true
|
|
||||||
source_file_dependencies:
|
|
||||||
- tests/evals/gpt_oss
|
|
||||||
- vllm/model_executor/models/gpt_oss.py
|
|
||||||
- vllm/model_executor/layers/quantization/mxfp4.py
|
|
||||||
- vllm/v1/attention/backends/flashinfer.py
|
|
||||||
commands:
|
|
||||||
- uv pip install --system 'gpt-oss[eval]==0.0.5'
|
|
||||||
- pytest -s -v tests/evals/gpt_oss/test_gpqa_correctness.py --model openai/gpt-oss-20b --metric 0.58
|
|
||||||
|
|
||||||
- label: Batch Invariance (H100)
|
- label: Batch Invariance (H100)
|
||||||
timeout_in_minutes: 25
|
timeout_in_minutes: 25
|
||||||
device: h100
|
device: h100
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ depends_on:
|
|||||||
steps:
|
steps:
|
||||||
- label: Basic Models Tests (Initialization)
|
- label: Basic Models Tests (Initialization)
|
||||||
timeout_in_minutes: 45
|
timeout_in_minutes: 45
|
||||||
mirror_hardwares: [amdexperimental]
|
|
||||||
torch_nightly: true
|
torch_nightly: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
@@ -16,7 +15,6 @@ steps:
|
|||||||
|
|
||||||
- label: Basic Models Tests (Extra Initialization) %N
|
- label: Basic Models Tests (Extra Initialization) %N
|
||||||
timeout_in_minutes: 45
|
timeout_in_minutes: 45
|
||||||
mirror_hardwares: [amdexperimental]
|
|
||||||
torch_nightly: true
|
torch_nightly: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/model_executor/models/
|
- vllm/model_executor/models/
|
||||||
@@ -33,10 +31,17 @@ steps:
|
|||||||
timeout_in_minutes: 45
|
timeout_in_minutes: 45
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
|
- tests/models/test_terratorch.py
|
||||||
- tests/models/test_transformers.py
|
- tests/models/test_transformers.py
|
||||||
- tests/models/test_registry.py
|
- tests/models/test_registry.py
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s models/test_transformers.py models/test_registry.py
|
- pytest -v -s models/test_terratorch.py models/test_transformers.py models/test_registry.py
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_1
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
|
||||||
|
|
||||||
- label: Basic Models Test (Other CPU) # 5min
|
- label: Basic Models Test (Other CPU) # 5min
|
||||||
depends_on:
|
depends_on:
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ depends_on:
|
|||||||
steps:
|
steps:
|
||||||
- label: Language Models Tests (Standard)
|
- label: Language Models Tests (Standard)
|
||||||
timeout_in_minutes: 25
|
timeout_in_minutes: 25
|
||||||
mirror_hardwares: [amdexperimental]
|
|
||||||
torch_nightly: true
|
torch_nightly: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
@@ -16,7 +15,6 @@ steps:
|
|||||||
|
|
||||||
- label: Language Models Tests (Extra Standard) %N
|
- label: Language Models Tests (Extra Standard) %N
|
||||||
timeout_in_minutes: 45
|
timeout_in_minutes: 45
|
||||||
mirror_hardwares: [amdexperimental]
|
|
||||||
torch_nightly: true
|
torch_nightly: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/model_executor/models/
|
- vllm/model_executor/models/
|
||||||
@@ -32,7 +30,6 @@ steps:
|
|||||||
|
|
||||||
- label: Language Models Tests (Hybrid) %N
|
- label: Language Models Tests (Hybrid) %N
|
||||||
timeout_in_minutes: 75
|
timeout_in_minutes: 75
|
||||||
mirror_hardwares: [amdexperimental]
|
|
||||||
torch_nightly: true
|
torch_nightly: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
@@ -40,7 +37,7 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
# Install fast path packages for testing against transformers
|
# Install fast path packages for testing against transformers
|
||||||
# Note: also needed to run plamo2 model in vLLM
|
# Note: also needed to run plamo2 model in vLLM
|
||||||
- uv pip install --system --no-build-isolation 'git+https://github.com/state-spaces/mamba@v2.2.5'
|
- uv pip install --system --no-build-isolation 'git+https://github.com/state-spaces/mamba@v2.3.0'
|
||||||
- uv pip install --system --no-build-isolation 'git+https://github.com/Dao-AILab/causal-conv1d@v1.5.2'
|
- uv pip install --system --no-build-isolation 'git+https://github.com/Dao-AILab/causal-conv1d@v1.5.2'
|
||||||
# Shard hybrid language model tests
|
# Shard hybrid language model tests
|
||||||
- pytest -v -s models/language/generation -m hybrid_model --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --shard-id=$$BUILDKITE_PARALLEL_JOB
|
- pytest -v -s models/language/generation -m hybrid_model --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --shard-id=$$BUILDKITE_PARALLEL_JOB
|
||||||
@@ -48,7 +45,6 @@ steps:
|
|||||||
|
|
||||||
- label: Language Models Test (Extended Generation) # 80min
|
- label: Language Models Test (Extended Generation) # 80min
|
||||||
timeout_in_minutes: 110
|
timeout_in_minutes: 110
|
||||||
mirror_hardwares: [amdexperimental]
|
|
||||||
optional: true
|
optional: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
@@ -56,13 +52,21 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
# Install fast path packages for testing against transformers
|
# Install fast path packages for testing against transformers
|
||||||
# Note: also needed to run plamo2 model in vLLM
|
# Note: also needed to run plamo2 model in vLLM
|
||||||
- uv pip install --system --no-build-isolation 'git+https://github.com/state-spaces/mamba@v2.2.5'
|
- uv pip install --system --no-build-isolation 'git+https://github.com/state-spaces/mamba@v2.3.0'
|
||||||
- uv pip install --system --no-build-isolation 'git+https://github.com/Dao-AILab/causal-conv1d@v1.5.2'
|
- uv pip install --system --no-build-isolation 'git+https://github.com/Dao-AILab/causal-conv1d@v1.5.2'
|
||||||
- pytest -v -s models/language/generation -m '(not core_model) and (not hybrid_model)'
|
- pytest -v -s models/language/generation -m '(not core_model) and (not hybrid_model)'
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_1
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
commands:
|
||||||
|
- uv pip install --system --no-build-isolation 'git+https://github.com/AndreasKaratzas/mamba@fix-rocm-7.0-warp-size-constexpr'
|
||||||
|
- uv pip install --system --no-build-isolation 'git+https://github.com/Dao-AILab/causal-conv1d@v1.5.2'
|
||||||
|
- pytest -v -s models/language/generation -m '(not core_model) and (not hybrid_model)'
|
||||||
|
|
||||||
- label: Language Models Test (PPL)
|
- label: Language Models Test (PPL)
|
||||||
timeout_in_minutes: 110
|
timeout_in_minutes: 110
|
||||||
mirror_hardwares: [amdexperimental]
|
|
||||||
optional: true
|
optional: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
@@ -72,17 +76,20 @@ steps:
|
|||||||
|
|
||||||
- label: Language Models Test (Extended Pooling) # 36min
|
- label: Language Models Test (Extended Pooling) # 36min
|
||||||
timeout_in_minutes: 50
|
timeout_in_minutes: 50
|
||||||
mirror_hardwares: [amdexperimental]
|
|
||||||
optional: true
|
optional: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
- tests/models/language/pooling
|
- tests/models/language/pooling
|
||||||
commands:
|
commands:
|
||||||
- pytest -v -s models/language/pooling -m 'not core_model'
|
- pytest -v -s models/language/pooling -m 'not core_model'
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_1
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
|
||||||
- label: Language Models Test (MTEB)
|
- label: Language Models Test (MTEB)
|
||||||
timeout_in_minutes: 110
|
timeout_in_minutes: 110
|
||||||
mirror_hardwares: [amdexperimental]
|
|
||||||
optional: true
|
optional: true
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ steps:
|
|||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
- tests/models/multimodal
|
- tests/models/multimodal
|
||||||
|
- tests/models/registry.py
|
||||||
device: cpu
|
device: cpu
|
||||||
commands:
|
commands:
|
||||||
- pip install git+https://github.com/TIGER-AI-Lab/Mantis.git
|
- pip install git+https://github.com/TIGER-AI-Lab/Mantis.git
|
||||||
@@ -30,6 +31,7 @@ steps:
|
|||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
- tests/models/multimodal
|
- tests/models/multimodal
|
||||||
|
- tests/models/registry.py
|
||||||
commands:
|
commands:
|
||||||
- pip install git+https://github.com/TIGER-AI-Lab/Mantis.git
|
- pip install git+https://github.com/TIGER-AI-Lab/Mantis.git
|
||||||
- pytest -v -s models/multimodal/processing/test_tensor_schema.py
|
- pytest -v -s models/multimodal/processing/test_tensor_schema.py
|
||||||
@@ -70,12 +72,3 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- pip install git+https://github.com/TIGER-AI-Lab/Mantis.git
|
- pip install git+https://github.com/TIGER-AI-Lab/Mantis.git
|
||||||
- pytest -v -s models/multimodal/generation/test_common.py -m 'split(group=1) and not core_model'
|
- pytest -v -s models/multimodal/generation/test_common.py -m 'split(group=1) and not core_model'
|
||||||
|
|
||||||
# This test is used only in PR development phase to test individual models and should never run on main
|
|
||||||
- label: Custom Models
|
|
||||||
optional: true
|
|
||||||
commands:
|
|
||||||
- echo 'Testing custom models...'
|
|
||||||
# PR authors can temporarily add commands below to test individual models
|
|
||||||
# e.g. pytest -v -s models/encoder_decoder/vision_language/test_mllama.py
|
|
||||||
# *To avoid merge conflicts, remember to REMOVE (not just comment out) them before merging the PR*
|
|
||||||
|
|||||||
@@ -19,6 +19,10 @@ steps:
|
|||||||
- pip install -e ./plugins/prithvi_io_processor_plugin
|
- pip install -e ./plugins/prithvi_io_processor_plugin
|
||||||
- pytest -v -s plugins_tests/test_io_processor_plugins.py
|
- pytest -v -s plugins_tests/test_io_processor_plugins.py
|
||||||
- pip uninstall prithvi_io_processor_plugin -y
|
- pip uninstall prithvi_io_processor_plugin -y
|
||||||
|
# test bge_m3_sparse io_processor plugin
|
||||||
|
- pip install -e ./plugins/bge_m3_sparse_plugin
|
||||||
|
- pytest -v -s plugins_tests/test_bge_m3_sparse_io_processor_plugins.py
|
||||||
|
- pip uninstall bge_m3_sparse_plugin -y
|
||||||
# end io_processor plugins test
|
# end io_processor plugins test
|
||||||
# begin stat_logger plugins test
|
# begin stat_logger plugins test
|
||||||
- pip install -e ./plugins/vllm_add_dummy_stat_logger
|
- pip install -e ./plugins/vllm_add_dummy_stat_logger
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ depends_on:
|
|||||||
- image-build
|
- image-build
|
||||||
steps:
|
steps:
|
||||||
- label: PyTorch Compilation Unit Tests
|
- label: PyTorch Compilation Unit Tests
|
||||||
timeout_in_minutes: 30
|
timeout_in_minutes: 10
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
- vllm/
|
- vllm/
|
||||||
- tests/compile
|
- tests/compile
|
||||||
@@ -17,6 +17,14 @@ steps:
|
|||||||
# (using -0 for proper path handling)
|
# (using -0 for proper path handling)
|
||||||
- "find compile/ -maxdepth 1 -name 'test_*.py' -print0 | xargs -0 -n1 -I{} pytest -s -v '{}'"
|
- "find compile/ -maxdepth 1 -name 'test_*.py' -print0 | xargs -0 -n1 -I{} pytest -s -v '{}'"
|
||||||
|
|
||||||
|
- label: PyTorch Compilation Passes Unit Tests
|
||||||
|
timeout_in_minutes: 20
|
||||||
|
source_file_dependencies:
|
||||||
|
- vllm/
|
||||||
|
- tests/compile/passes
|
||||||
|
commands:
|
||||||
|
- pytest -s -v compile/passes --ignore compile/passes/distributed
|
||||||
|
|
||||||
- label: PyTorch Fullgraph Smoke Test
|
- label: PyTorch Fullgraph Smoke Test
|
||||||
timeout_in_minutes: 35
|
timeout_in_minutes: 35
|
||||||
source_file_dependencies:
|
source_file_dependencies:
|
||||||
|
|||||||
16
.buildkite/test_areas/ray_compat.yaml
Normal file
16
.buildkite/test_areas/ray_compat.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
group: Ray Compatibility
|
||||||
|
depends_on:
|
||||||
|
- image-build
|
||||||
|
steps:
|
||||||
|
- label: Ray Dependency Compatibility Check
|
||||||
|
# Informational only — does not block the pipeline.
|
||||||
|
# If this fails, it means the PR introduces a dependency that
|
||||||
|
# conflicts with Ray's dependency constraints.
|
||||||
|
# See https://github.com/vllm-project/vllm/issues/33599
|
||||||
|
soft_fail: true
|
||||||
|
timeout_in_minutes: 10
|
||||||
|
source_file_dependencies:
|
||||||
|
- requirements/
|
||||||
|
- setup.py
|
||||||
|
commands:
|
||||||
|
- bash /vllm-workspace/.buildkite/scripts/check-ray-compatibility.sh
|
||||||
@@ -12,3 +12,10 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- pytest -v -s samplers
|
- pytest -v -s samplers
|
||||||
- VLLM_USE_FLASHINFER_SAMPLER=1 pytest -v -s samplers
|
- VLLM_USE_FLASHINFER_SAMPLER=1 pytest -v -s samplers
|
||||||
|
mirror:
|
||||||
|
amd:
|
||||||
|
device: mi325_1
|
||||||
|
depends_on:
|
||||||
|
- image-build-amd
|
||||||
|
commands:
|
||||||
|
- pytest -v -s samplers
|
||||||
|
|||||||
@@ -13,13 +13,13 @@ steps:
|
|||||||
commands:
|
commands:
|
||||||
- bash weight_loading/run_model_weight_loading_test.sh -c weight_loading/models.txt
|
- bash weight_loading/run_model_weight_loading_test.sh -c weight_loading/models.txt
|
||||||
|
|
||||||
- label: Weight Loading Multiple GPU - Large Models # optional
|
# - label: Weight Loading Multiple GPU - Large Models # optional
|
||||||
working_dir: "/vllm-workspace/tests"
|
# working_dir: "/vllm-workspace/tests"
|
||||||
num_devices: 2
|
# num_devices: 2
|
||||||
device: a100
|
# device: a100
|
||||||
optional: true
|
# optional: true
|
||||||
source_file_dependencies:
|
# source_file_dependencies:
|
||||||
- vllm/
|
# - vllm/
|
||||||
- tests/weight_loading
|
# - tests/weight_loading
|
||||||
commands:
|
# commands:
|
||||||
- bash weight_loading/run_model_weight_loading_test.sh -c weight_loading/models-large.txt
|
# - bash weight_loading/run_model_weight_loading_test.sh -c weight_loading/models-large.txt
|
||||||
|
|||||||
24
.github/.bc-linter.yml
vendored
24
.github/.bc-linter.yml
vendored
@@ -1,24 +0,0 @@
|
|||||||
# doc: https://github.com/pytorch/test-infra/blob/main/tools/stronghold/docs/bc_linter_config.md
|
|
||||||
version: 1
|
|
||||||
paths:
|
|
||||||
# We temporarily disable globally, and will only enable with `annotations.include`
|
|
||||||
# include:
|
|
||||||
# - "vllm/v1/attetion/*.py"
|
|
||||||
# - "vllm/v1/core/*.py"
|
|
||||||
exclude:
|
|
||||||
- "**/*.py"
|
|
||||||
|
|
||||||
scan:
|
|
||||||
functions: true # check free functions and methods
|
|
||||||
classes: true # check classes/dataclasses
|
|
||||||
public_only: true # ignore names starting with "_" at any level
|
|
||||||
|
|
||||||
annotations:
|
|
||||||
include: # decorators that force‑include a symbol
|
|
||||||
- name: "bc_linter_include" # matched by simple name or dotted suffix
|
|
||||||
propagate_to_members: false # for classes, include methods/inner classes
|
|
||||||
exclude: # decorators that force‑exclude a symbol
|
|
||||||
- name: "bc_linter_skip" # matched by simple name or dotted suffix
|
|
||||||
propagate_to_members: true # for classes, exclude methods/inner classes
|
|
||||||
|
|
||||||
excluded_violations: [] # e.g. ["ParameterRenamed", "FieldTypeChanged"]
|
|
||||||
55
.github/CODEOWNERS
vendored
55
.github/CODEOWNERS
vendored
@@ -2,45 +2,66 @@
|
|||||||
# for more info about CODEOWNERS file
|
# for more info about CODEOWNERS file
|
||||||
|
|
||||||
# This lists cover the "core" components of vLLM that require careful review
|
# This lists cover the "core" components of vLLM that require careful review
|
||||||
/vllm/executor/executor_base.py @zhuohan123 @youkaichao @alexm-redhat @njhill @22quinn
|
/vllm/compilation @zou3519 @youkaichao @ProExpertProg @BoyuanFeng
|
||||||
/vllm/model_executor/layers/attention @LucasWilkinson
|
/vllm/distributed/kv_transfer @NickLucche @ApostaC @orozery
|
||||||
|
/vllm/lora @jeejeelee
|
||||||
|
/vllm/model_executor/layers/attention @LucasWilkinson @MatthewBonanni
|
||||||
/vllm/model_executor/layers/fused_moe @mgoin @pavanimajety
|
/vllm/model_executor/layers/fused_moe @mgoin @pavanimajety
|
||||||
/vllm/model_executor/layers/quantization @mgoin @robertgshaw2-redhat @tlrmchlsmth @yewentao256 @pavanimajety
|
/vllm/model_executor/layers/quantization @mgoin @robertgshaw2-redhat @tlrmchlsmth @yewentao256 @pavanimajety
|
||||||
/vllm/model_executor/layers/mamba @tdoublep
|
/vllm/model_executor/layers/mamba @tdoublep
|
||||||
/vllm/model_executor/model_loader @22quinn
|
/vllm/model_executor/model_loader @22quinn
|
||||||
/vllm/model_executor/layers/batch_invariant.py @yewentao256
|
/vllm/model_executor/layers/batch_invariant.py @yewentao256
|
||||||
/vllm/multimodal @DarkLight1337 @ywang96 @NickLucche @tjtanaa
|
/vllm/multimodal @DarkLight1337 @ywang96 @NickLucche @tjtanaa
|
||||||
/vllm/vllm_flash_attn @LucasWilkinson
|
/vllm/vllm_flash_attn @LucasWilkinson @MatthewBonanni
|
||||||
/vllm/lora @jeejeelee
|
|
||||||
/vllm/reasoning @aarnphm @chaunceyjiang
|
|
||||||
/vllm/entrypoints @aarnphm @chaunceyjiang
|
|
||||||
/vllm/tool_parsers @aarnphm @chaunceyjiang
|
|
||||||
/vllm/compilation @zou3519 @youkaichao @ProExpertProg
|
|
||||||
/vllm/distributed/kv_transfer @NickLucche @ApostaC @orozery
|
|
||||||
CMakeLists.txt @tlrmchlsmth @LucasWilkinson
|
CMakeLists.txt @tlrmchlsmth @LucasWilkinson
|
||||||
|
|
||||||
# Any change to the VllmConfig changes can have a large user-facing impact,
|
# Any change to the VllmConfig changes can have a large user-facing impact,
|
||||||
# so spam a lot of people
|
# so spam a lot of people
|
||||||
/vllm/config @WoosukKwon @youkaichao @robertgshaw2-redhat @mgoin @tlrmchlsmth @houseroad @hmellor @yewentao256 @ProExpertProg
|
/vllm/config @WoosukKwon @youkaichao @robertgshaw2-redhat @mgoin @tlrmchlsmth @houseroad @hmellor @yewentao256 @ProExpertProg
|
||||||
/vllm/config/cache.py @WoosukKwon @youkaichao @robertgshaw2-redhat @mgoin @tlrmchlsmth @houseroad @hmellor @yewentao256 @ProExpertProg @heheda12345
|
/vllm/config/cache.py @heheda12345
|
||||||
|
|
||||||
|
# Entrypoints
|
||||||
|
/vllm/entrypoints/anthropic @mgoin @DarkLight1337
|
||||||
|
/vllm/entrypoints/cli @hmellor @mgoin @DarkLight1337 @russellb
|
||||||
|
/vllm/entrypoints/mcp @heheda12345
|
||||||
|
/vllm/entrypoints/openai @aarnphm @chaunceyjiang @DarkLight1337 @russellb
|
||||||
|
/vllm/entrypoints/openai/realtime @njhill
|
||||||
|
/vllm/entrypoints/openai/speech_to_text @NickLucche
|
||||||
|
/vllm/entrypoints/pooling @noooop
|
||||||
|
/vllm/entrypoints/sagemaker @DarkLight1337
|
||||||
|
/vllm/entrypoints/serve @njhill
|
||||||
|
/vllm/entrypoints/*.py @njhill
|
||||||
|
/vllm/entrypoints/chat_utils.py @DarkLight1337
|
||||||
|
/vllm/entrypoints/llm.py @DarkLight1337
|
||||||
|
|
||||||
|
# Input/Output Processing
|
||||||
|
/vllm/sampling_params.py @njhill @NickLucche
|
||||||
|
/vllm/pooling_params.py @noooop @DarkLight1337
|
||||||
|
/vllm/tokenizers @DarkLight1337 @njhill
|
||||||
|
/vllm/renderers @DarkLight1337 @njhill
|
||||||
|
/vllm/reasoning @aarnphm @chaunceyjiang
|
||||||
|
/vllm/tool_parsers @aarnphm @chaunceyjiang
|
||||||
|
|
||||||
# vLLM V1
|
# vLLM V1
|
||||||
/vllm/v1/attention @LucasWilkinson
|
/vllm/v1/attention @LucasWilkinson @MatthewBonanni
|
||||||
/vllm/v1/attention/backend.py @WoosukKwon @zhuohan123 @youkaichao @alexm-redhat @njhill
|
/vllm/v1/attention/backend.py @WoosukKwon @zhuohan123 @youkaichao @alexm-redhat @njhill
|
||||||
/vllm/v1/attention/backends/mla @pavanimajety
|
/vllm/v1/attention/backends/mla @pavanimajety
|
||||||
/vllm/v1/attention/backends/flashinfer.py @mgoin @pavanimajety
|
/vllm/v1/attention/backends/flashinfer.py @mgoin @pavanimajety
|
||||||
/vllm/v1/attention/backends/triton_attn.py @tdoublep
|
/vllm/v1/attention/backends/triton_attn.py @tdoublep
|
||||||
/vllm/v1/core @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @alexm-redhat @heheda12345 @ApostaC @orozery
|
/vllm/v1/core @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @alexm-redhat @heheda12345 @ApostaC @orozery
|
||||||
/vllm/v1/sample @22quinn @houseroad @njhill
|
/vllm/v1/sample @22quinn @houseroad @njhill
|
||||||
/vllm/v1/spec_decode @benchislett @luccafong
|
/vllm/v1/spec_decode @benchislett @luccafong @MatthewBonanni
|
||||||
/vllm/v1/structured_output @mgoin @russellb @aarnphm @benchislett
|
/vllm/v1/structured_output @mgoin @russellb @aarnphm @benchislett
|
||||||
/vllm/v1/kv_cache_interface.py @heheda12345
|
/vllm/v1/kv_cache_interface.py @heheda12345
|
||||||
/vllm/v1/kv_offload @ApostaC @orozery
|
/vllm/v1/kv_offload @ApostaC @orozery
|
||||||
/vllm/v1/worker/gpu/kv_connector.py @orozery
|
/vllm/v1/engine @njhill
|
||||||
/vllm/v1/worker/kv_connector_model_runner_mixin.py @orozery
|
/vllm/v1/executor @njhill
|
||||||
|
/vllm/v1/worker @njhill
|
||||||
|
/vllm/v1/worker/kv_connector_model_runner_mixin.py @orozery @NickLucche
|
||||||
|
|
||||||
# Model runner V2
|
# Model runner V2
|
||||||
/vllm/v1/worker/gpu @WoosukKwon
|
/vllm/v1/worker/gpu @WoosukKwon @njhill
|
||||||
|
/vllm/v1/worker/gpu/kv_connector.py @orozery
|
||||||
|
|
||||||
# Test ownership
|
# Test ownership
|
||||||
/.buildkite/lm-eval-harness @mgoin
|
/.buildkite/lm-eval-harness @mgoin
|
||||||
@@ -115,8 +136,8 @@ mkdocs.yaml @hmellor
|
|||||||
/vllm/model_executor/models/mixtral*.py @patrickvonplaten
|
/vllm/model_executor/models/mixtral*.py @patrickvonplaten
|
||||||
/vllm/model_executor/models/voxtral*.py @patrickvonplaten
|
/vllm/model_executor/models/voxtral*.py @patrickvonplaten
|
||||||
/vllm/model_executor/models/pixtral*.py @patrickvonplaten
|
/vllm/model_executor/models/pixtral*.py @patrickvonplaten
|
||||||
|
/vllm/tokenizers/mistral.py @patrickvonplaten
|
||||||
/vllm/transformers_utils/configs/mistral.py @patrickvonplaten
|
/vllm/transformers_utils/configs/mistral.py @patrickvonplaten
|
||||||
/vllm/transformers_utils/tokenizers/mistral.py @patrickvonplaten
|
|
||||||
|
|
||||||
# Kernels
|
# Kernels
|
||||||
/vllm/v1/attention/ops/chunked_prefill_paged_decode.py @tdoublep
|
/vllm/v1/attention/ops/chunked_prefill_paged_decode.py @tdoublep
|
||||||
@@ -152,9 +173,7 @@ mkdocs.yaml @hmellor
|
|||||||
/examples/pooling @noooop
|
/examples/pooling @noooop
|
||||||
/tests/models/*/pooling* @noooop
|
/tests/models/*/pooling* @noooop
|
||||||
/tests/entrypoints/pooling @noooop
|
/tests/entrypoints/pooling @noooop
|
||||||
/vllm/entrypoints/pooling @noooop
|
|
||||||
/vllm/config/pooler.py @noooop
|
/vllm/config/pooler.py @noooop
|
||||||
/vllm/pooling_params.py @noooop
|
|
||||||
/vllm/model_executor/layers/pooler @noooop
|
/vllm/model_executor/layers/pooler @noooop
|
||||||
|
|
||||||
# Security guide and policies
|
# Security guide and policies
|
||||||
|
|||||||
3
.github/mergify.yml
vendored
3
.github/mergify.yml
vendored
@@ -259,8 +259,7 @@ pull_request_rules:
|
|||||||
- files=benchmarks/run_structured_output_benchmark.sh
|
- files=benchmarks/run_structured_output_benchmark.sh
|
||||||
- files=docs/features/structured_outputs.md
|
- files=docs/features/structured_outputs.md
|
||||||
- files=examples/offline_inference/structured_outputs.py
|
- files=examples/offline_inference/structured_outputs.py
|
||||||
- files=examples/online_serving/openai_chat_completion_structured_outputs.py
|
- files=examples/online_serving/structured_outputs/structured_outputs.py
|
||||||
- files=examples/online_serving/openai_chat_completion_structured_outputs_with_reasoning.py
|
|
||||||
- files~=^tests/v1/structured_output/
|
- files~=^tests/v1/structured_output/
|
||||||
- files=tests/v1/entrypoints/llm/test_struct_output_generate.py
|
- files=tests/v1/entrypoints/llm/test_struct_output_generate.py
|
||||||
- files~=^vllm/v1/structured_output/
|
- files~=^vllm/v1/structured_output/
|
||||||
|
|||||||
29
.github/workflows/bc-lint.yml
vendored
29
.github/workflows/bc-lint.yml
vendored
@@ -1,29 +0,0 @@
|
|||||||
name: BC Lint
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
types:
|
|
||||||
- opened
|
|
||||||
- synchronize
|
|
||||||
- reopened
|
|
||||||
- labeled
|
|
||||||
- unlabeled
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
bc_lint:
|
|
||||||
if: github.repository_owner == 'vllm-project'
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Run BC Lint Action
|
|
||||||
uses: pytorch/test-infra/.github/actions/bc-lint@main
|
|
||||||
with:
|
|
||||||
repo: ${{ github.event.pull_request.head.repo.full_name }}
|
|
||||||
base_sha: ${{ github.event.pull_request.base.sha }}
|
|
||||||
head_sha: ${{ github.event.pull_request.head.sha }}
|
|
||||||
suppression: ${{ contains(github.event.pull_request.labels.*.name, 'suppress-bc-linter') }}
|
|
||||||
docs_link: 'https://github.com/pytorch/test-infra/wiki/BC-Linter'
|
|
||||||
config_dir: .github
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
1
.github/workflows/cleanup_pr_body.yml
vendored
1
.github/workflows/cleanup_pr_body.yml
vendored
@@ -19,6 +19,7 @@ jobs:
|
|||||||
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
|
||||||
with:
|
with:
|
||||||
python-version: '3.12'
|
python-version: '3.12'
|
||||||
|
cache: 'pip'
|
||||||
|
|
||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -3,6 +3,8 @@
|
|||||||
|
|
||||||
# vllm-flash-attn built from source
|
# vllm-flash-attn built from source
|
||||||
vllm/vllm_flash_attn/*
|
vllm/vllm_flash_attn/*
|
||||||
|
!vllm/vllm_flash_attn/__init__.py
|
||||||
|
!vllm/vllm_flash_attn/flash_attn_interface.py
|
||||||
|
|
||||||
# OpenAI triton kernels copied from source
|
# OpenAI triton kernels copied from source
|
||||||
vllm/third_party/triton_kernels/*
|
vllm/third_party/triton_kernels/*
|
||||||
@@ -238,3 +240,6 @@ ep_kernels_workspace/
|
|||||||
vllm/grpc/vllm_engine_pb2.py
|
vllm/grpc/vllm_engine_pb2.py
|
||||||
vllm/grpc/vllm_engine_pb2_grpc.py
|
vllm/grpc/vllm_engine_pb2_grpc.py
|
||||||
vllm/grpc/vllm_engine_pb2.pyi
|
vllm/grpc/vllm_engine_pb2.pyi
|
||||||
|
|
||||||
|
# Ignore generated cpu headers
|
||||||
|
csrc/cpu/cpu_attn_dispatch_generated.h
|
||||||
|
|||||||
@@ -121,24 +121,9 @@ repos:
|
|||||||
name: Update Dockerfile dependency graph
|
name: Update Dockerfile dependency graph
|
||||||
entry: tools/pre_commit/update-dockerfile-graph.sh
|
entry: tools/pre_commit/update-dockerfile-graph.sh
|
||||||
language: script
|
language: script
|
||||||
- id: enforce-import-regex-instead-of-re
|
- id: check-forbidden-imports
|
||||||
name: Enforce import regex as re
|
name: Check for forbidden imports
|
||||||
entry: python tools/pre_commit/enforce_regex_import.py
|
entry: python tools/pre_commit/check_forbidden_imports.py
|
||||||
language: python
|
|
||||||
types: [python]
|
|
||||||
pass_filenames: false
|
|
||||||
additional_dependencies: [regex]
|
|
||||||
# forbid directly import triton
|
|
||||||
- id: forbid-direct-triton-import
|
|
||||||
name: "Forbid direct 'import triton'"
|
|
||||||
entry: python tools/pre_commit/check_triton_import.py
|
|
||||||
language: python
|
|
||||||
types: [python]
|
|
||||||
pass_filenames: false
|
|
||||||
additional_dependencies: [regex]
|
|
||||||
- id: check-pickle-imports
|
|
||||||
name: Prevent new pickle/cloudpickle imports
|
|
||||||
entry: python tools/pre_commit/check_pickle_imports.py
|
|
||||||
language: python
|
language: python
|
||||||
types: [python]
|
types: [python]
|
||||||
additional_dependencies: [regex]
|
additional_dependencies: [regex]
|
||||||
@@ -158,6 +143,11 @@ repos:
|
|||||||
name: Check attention backend documentation is up to date
|
name: Check attention backend documentation is up to date
|
||||||
entry: python tools/pre_commit/generate_attention_backend_docs.py --check
|
entry: python tools/pre_commit/generate_attention_backend_docs.py --check
|
||||||
language: python
|
language: python
|
||||||
|
- id: check-boolean-context-manager
|
||||||
|
name: Check for boolean ops in with-statements
|
||||||
|
entry: python tools/pre_commit/check_boolean_context_manager.py
|
||||||
|
language: python
|
||||||
|
types: [python]
|
||||||
# Keep `suggestion` last
|
# Keep `suggestion` last
|
||||||
- id: suggestion
|
- id: suggestion
|
||||||
name: Suggestion
|
name: Suggestion
|
||||||
|
|||||||
@@ -9,13 +9,14 @@ build:
|
|||||||
python: "3.12"
|
python: "3.12"
|
||||||
jobs:
|
jobs:
|
||||||
post_checkout:
|
post_checkout:
|
||||||
- git fetch --unshallow || true
|
- git fetch origin main --unshallow --no-tags --filter=blob:none || true
|
||||||
|
pre_create_environment:
|
||||||
|
- pip install uv
|
||||||
|
create_environment:
|
||||||
|
- uv venv $READTHEDOCS_VIRTUALENV_PATH
|
||||||
|
install:
|
||||||
|
- uv pip install --python $READTHEDOCS_VIRTUALENV_PATH/bin/python --no-cache-dir -r requirements/docs.txt
|
||||||
|
|
||||||
mkdocs:
|
mkdocs:
|
||||||
configuration: mkdocs.yaml
|
configuration: mkdocs.yaml
|
||||||
fail_on_warning: true
|
fail_on_warning: true
|
||||||
|
|
||||||
# Optionally declare the Python requirements required to build your docs
|
|
||||||
python:
|
|
||||||
install:
|
|
||||||
- requirements: requirements/docs.txt
|
|
||||||
|
|||||||
@@ -56,8 +56,8 @@ endif()
|
|||||||
# requirements.txt files and should be kept consistent. The ROCm torch
|
# requirements.txt files and should be kept consistent. The ROCm torch
|
||||||
# versions are derived from docker/Dockerfile.rocm
|
# versions are derived from docker/Dockerfile.rocm
|
||||||
#
|
#
|
||||||
set(TORCH_SUPPORTED_VERSION_CUDA "2.9.1")
|
set(TORCH_SUPPORTED_VERSION_CUDA "2.10.0")
|
||||||
set(TORCH_SUPPORTED_VERSION_ROCM "2.9.1")
|
set(TORCH_SUPPORTED_VERSION_ROCM "2.10.0")
|
||||||
|
|
||||||
#
|
#
|
||||||
# Try to find python package with an executable that exactly matches
|
# Try to find python package with an executable that exactly matches
|
||||||
@@ -293,6 +293,7 @@ set(VLLM_EXT_SRC
|
|||||||
"csrc/fused_qknorm_rope_kernel.cu"
|
"csrc/fused_qknorm_rope_kernel.cu"
|
||||||
"csrc/layernorm_quant_kernels.cu"
|
"csrc/layernorm_quant_kernels.cu"
|
||||||
"csrc/sampler.cu"
|
"csrc/sampler.cu"
|
||||||
|
"csrc/topk.cu"
|
||||||
"csrc/cuda_view.cu"
|
"csrc/cuda_view.cu"
|
||||||
"csrc/quantization/gptq/q_gemm.cu"
|
"csrc/quantization/gptq/q_gemm.cu"
|
||||||
"csrc/quantization/w8a8/int8/scaled_quant.cu"
|
"csrc/quantization/w8a8/int8/scaled_quant.cu"
|
||||||
@@ -433,7 +434,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
|
|||||||
list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_BF16_KERNEL_SRC})
|
list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_BF16_KERNEL_SRC})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (MARLIN_SM75_ARCHS)
|
if (MARLIN_SM75_ARCHS)
|
||||||
file(GLOB MARLIN_TEMPLATE_SM75_KERNEL_SRC "csrc/quantization/marlin/sm75_kernel_*.cu")
|
file(GLOB MARLIN_TEMPLATE_SM75_KERNEL_SRC "csrc/quantization/marlin/sm75_kernel_*.cu")
|
||||||
set_gencode_flags_for_srcs(
|
set_gencode_flags_for_srcs(
|
||||||
SRCS "${MARLIN_TEMPLATE_SM75_KERNEL_SRC}"
|
SRCS "${MARLIN_TEMPLATE_SM75_KERNEL_SRC}"
|
||||||
@@ -445,7 +446,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
|
|||||||
list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_SM75_KERNEL_SRC})
|
list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_SM75_KERNEL_SRC})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (MARLIN_FP8_ARCHS)
|
if (MARLIN_FP8_ARCHS)
|
||||||
file(GLOB MARLIN_TEMPLATE_FP8_KERNEL_SRC "csrc/quantization/marlin/sm89_kernel_*.cu")
|
file(GLOB MARLIN_TEMPLATE_FP8_KERNEL_SRC "csrc/quantization/marlin/sm89_kernel_*.cu")
|
||||||
set_gencode_flags_for_srcs(
|
set_gencode_flags_for_srcs(
|
||||||
SRCS "${MARLIN_TEMPLATE_FP8_KERNEL_SRC}"
|
SRCS "${MARLIN_TEMPLATE_FP8_KERNEL_SRC}"
|
||||||
@@ -724,7 +725,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
|
|||||||
# CUTLASS MoE kernels
|
# CUTLASS MoE kernels
|
||||||
|
|
||||||
# The MoE kernel cutlass_moe_mm requires CUDA 12.3 or later (and ONLY works
|
# The MoE kernel cutlass_moe_mm requires CUDA 12.3 or later (and ONLY works
|
||||||
# on Hopper). get_cutlass_(pplx_)moe_mm_data should only be compiled
|
# on Hopper). get_cutlass_(batched_)moe_mm_data should only be compiled
|
||||||
# if it's possible to compile MoE kernels that use its output.
|
# if it's possible to compile MoE kernels that use its output.
|
||||||
cuda_archs_loose_intersection(SCALED_MM_ARCHS "9.0a" "${CUDA_ARCHS}")
|
cuda_archs_loose_intersection(SCALED_MM_ARCHS "9.0a" "${CUDA_ARCHS}")
|
||||||
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.3 AND SCALED_MM_ARCHS)
|
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.3 AND SCALED_MM_ARCHS)
|
||||||
@@ -770,6 +771,51 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# Expert-specialization MXFP8 blockscaled grouped kernels (SM100+).
|
||||||
|
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 13.0)
|
||||||
|
cuda_archs_loose_intersection(ES_MXFP8_GROUPED_MM_ARCHS "10.0f;11.0f" "${CUDA_ARCHS}")
|
||||||
|
else()
|
||||||
|
cuda_archs_loose_intersection(ES_MXFP8_GROUPED_MM_ARCHS "10.0a;10.1a;10.3a" "${CUDA_ARCHS}")
|
||||||
|
endif()
|
||||||
|
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8 AND ES_MXFP8_GROUPED_MM_ARCHS)
|
||||||
|
set(SRCS
|
||||||
|
"csrc/moe/mxfp8_moe/cutlass_mxfp8_grouped_mm.cu"
|
||||||
|
"csrc/moe/mxfp8_moe/mxfp8_experts_quant.cu")
|
||||||
|
set_gencode_flags_for_srcs(
|
||||||
|
SRCS "${SRCS}"
|
||||||
|
CUDA_ARCHS "${ES_MXFP8_GROUPED_MM_ARCHS}")
|
||||||
|
list(APPEND VLLM_EXT_SRC "${SRCS}")
|
||||||
|
list(APPEND VLLM_GPU_FLAGS "-DENABLE_ES_MXFP8_GROUPED_MM_SM100=1")
|
||||||
|
message(STATUS "Building ES MXFP8 grouped kernels for archs: ${ES_MXFP8_GROUPED_MM_ARCHS}")
|
||||||
|
else()
|
||||||
|
if (NOT ${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8
|
||||||
|
AND ES_MXFP8_GROUPED_MM_ARCHS)
|
||||||
|
message(STATUS "Not building ES MXFP8 grouped kernels as CUDA Compiler version is "
|
||||||
|
"not >= 12.8.")
|
||||||
|
else()
|
||||||
|
message(STATUS "Not building ES MXFP8 grouped kernels as no compatible archs found "
|
||||||
|
"in CUDA target architectures.")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
# DeepSeek V3 fused A GEMM kernel (requires SM 9.0+, Hopper and later)
|
||||||
|
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 13.0)
|
||||||
|
cuda_archs_loose_intersection(DSV3_FUSED_A_GEMM_ARCHS "9.0a;10.0f;11.0f" "${CUDA_ARCHS}")
|
||||||
|
else()
|
||||||
|
cuda_archs_loose_intersection(DSV3_FUSED_A_GEMM_ARCHS "9.0a;10.0a;10.1a;10.3a" "${CUDA_ARCHS}")
|
||||||
|
endif()
|
||||||
|
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.0 AND DSV3_FUSED_A_GEMM_ARCHS)
|
||||||
|
set(DSV3_FUSED_A_GEMM_SRC "csrc/dsv3_fused_a_gemm.cu")
|
||||||
|
set_gencode_flags_for_srcs(
|
||||||
|
SRCS "${DSV3_FUSED_A_GEMM_SRC}"
|
||||||
|
CUDA_ARCHS "${DSV3_FUSED_A_GEMM_ARCHS}")
|
||||||
|
list(APPEND VLLM_EXT_SRC ${DSV3_FUSED_A_GEMM_SRC})
|
||||||
|
message(STATUS "Building dsv3_fused_a_gemm for archs: ${DSV3_FUSED_A_GEMM_ARCHS}")
|
||||||
|
else()
|
||||||
|
message(STATUS "Not building dsv3_fused_a_gemm as no compatible archs found "
|
||||||
|
"in CUDA target architectures.")
|
||||||
|
endif()
|
||||||
|
|
||||||
# moe_data.cu is used by all CUTLASS MoE kernels.
|
# moe_data.cu is used by all CUTLASS MoE kernels.
|
||||||
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 13.0)
|
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 13.0)
|
||||||
cuda_archs_loose_intersection(CUTLASS_MOE_DATA_ARCHS "9.0a;10.0f;11.0f;12.0f" "${CUDA_ARCHS}")
|
cuda_archs_loose_intersection(CUTLASS_MOE_DATA_ARCHS "9.0a;10.0f;11.0f;12.0f" "${CUDA_ARCHS}")
|
||||||
@@ -952,7 +998,8 @@ set(VLLM_MOE_EXT_SRC
|
|||||||
if(VLLM_GPU_LANG STREQUAL "CUDA")
|
if(VLLM_GPU_LANG STREQUAL "CUDA")
|
||||||
list(APPEND VLLM_MOE_EXT_SRC
|
list(APPEND VLLM_MOE_EXT_SRC
|
||||||
"csrc/moe/moe_wna16.cu"
|
"csrc/moe/moe_wna16.cu"
|
||||||
"csrc/moe/grouped_topk_kernels.cu")
|
"csrc/moe/grouped_topk_kernels.cu"
|
||||||
|
"csrc/moe/router_gemm.cu")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(VLLM_GPU_LANG STREQUAL "CUDA")
|
if(VLLM_GPU_LANG STREQUAL "CUDA")
|
||||||
@@ -1042,7 +1089,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
|
|||||||
list(APPEND VLLM_MOE_EXT_SRC ${MARLIN_MOE_SRC})
|
list(APPEND VLLM_MOE_EXT_SRC ${MARLIN_MOE_SRC})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (MARLIN_MOE_SM75_ARCHS)
|
if (MARLIN_MOE_SM75_ARCHS)
|
||||||
file(GLOB MARLIN_MOE_SM75_SRC "csrc/moe/marlin_moe_wna16/sm75_kernel_*.cu")
|
file(GLOB MARLIN_MOE_SM75_SRC "csrc/moe/marlin_moe_wna16/sm75_kernel_*.cu")
|
||||||
set_gencode_flags_for_srcs(
|
set_gencode_flags_for_srcs(
|
||||||
SRCS "${MARLIN_MOE_SM75_SRC}"
|
SRCS "${MARLIN_MOE_SM75_SRC}"
|
||||||
@@ -1081,6 +1128,27 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
|
|||||||
message(STATUS "Not building Marlin MOE kernels as no compatible archs found"
|
message(STATUS "Not building Marlin MOE kernels as no compatible archs found"
|
||||||
" in CUDA target architectures")
|
" in CUDA target architectures")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# DeepSeek V3 router GEMM kernel - requires SM90+
|
||||||
|
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 13.0)
|
||||||
|
cuda_archs_loose_intersection(DSV3_ROUTER_GEMM_ARCHS "9.0a;10.0f;11.0f" "${CUDA_ARCHS}")
|
||||||
|
else()
|
||||||
|
cuda_archs_loose_intersection(DSV3_ROUTER_GEMM_ARCHS "9.0a;10.0a;10.1a;10.3a" "${CUDA_ARCHS}")
|
||||||
|
endif()
|
||||||
|
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.0 AND DSV3_ROUTER_GEMM_ARCHS)
|
||||||
|
set(DSV3_ROUTER_GEMM_SRC
|
||||||
|
"csrc/moe/dsv3_router_gemm_entry.cu"
|
||||||
|
"csrc/moe/dsv3_router_gemm_float_out.cu"
|
||||||
|
"csrc/moe/dsv3_router_gemm_bf16_out.cu")
|
||||||
|
set_gencode_flags_for_srcs(
|
||||||
|
SRCS "${DSV3_ROUTER_GEMM_SRC}"
|
||||||
|
CUDA_ARCHS "${DSV3_ROUTER_GEMM_ARCHS}")
|
||||||
|
list(APPEND VLLM_MOE_EXT_SRC "${DSV3_ROUTER_GEMM_SRC}")
|
||||||
|
message(STATUS "Building DSV3 router GEMM kernel for archs: ${DSV3_ROUTER_GEMM_ARCHS}")
|
||||||
|
else()
|
||||||
|
message(STATUS "Not building DSV3 router GEMM kernel as no compatible archs found"
|
||||||
|
" (requires SM90+ and CUDA >= 12.0)")
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
message(STATUS "Enabling moe extension.")
|
message(STATUS "Enabling moe extension.")
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ This directory used to contain vLLM's benchmark scripts and utilities for perfor
|
|||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
For detailed usage instructions, examples, and dataset information, see the [Benchmark CLI documentation](https://docs.vllm.ai/en/latest/contributing/benchmarks.html#benchmark-cli).
|
For detailed usage instructions, examples, and dataset information, see the [Benchmark CLI documentation](https://docs.vllm.ai/en/latest/benchmarking/cli/#benchmark-cli).
|
||||||
|
|
||||||
For full CLI reference see:
|
For full CLI reference see:
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ from .common import (
|
|||||||
BenchmarkConfig,
|
BenchmarkConfig,
|
||||||
BenchmarkResult,
|
BenchmarkResult,
|
||||||
MockLayer,
|
MockLayer,
|
||||||
MockModelConfig,
|
|
||||||
ResultsFormatter,
|
ResultsFormatter,
|
||||||
get_attention_scale,
|
get_attention_scale,
|
||||||
is_mla_backend,
|
is_mla_backend,
|
||||||
@@ -36,7 +35,6 @@ __all__ = [
|
|||||||
"ResultsFormatter",
|
"ResultsFormatter",
|
||||||
# Mock objects
|
# Mock objects
|
||||||
"MockLayer",
|
"MockLayer",
|
||||||
"MockModelConfig",
|
|
||||||
# Utilities
|
# Utilities
|
||||||
"setup_mla_dims",
|
"setup_mla_dims",
|
||||||
"get_attention_scale",
|
"get_attention_scale",
|
||||||
|
|||||||
@@ -229,3 +229,40 @@ def get_batch_stats(requests: list[BatchRequest]) -> dict:
|
|||||||
sum(r.kv_len for r in requests) / len(requests) if requests else 0
|
sum(r.kv_len for r in requests) / len(requests) if requests else 0
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_batch_type(batch_spec: str, spec_decode_threshold: int = 8) -> str:
|
||||||
|
"""
|
||||||
|
Classify a batch spec into a type string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
batch_spec: Batch specification string (e.g., "q2k", "8q1s1k", "2q2k_8q1s1k")
|
||||||
|
spec_decode_threshold: Max q_len to be considered spec-decode vs extend
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Type string: "prefill", "decode", "spec-decode", "extend", or "mixed (types...)"
|
||||||
|
"""
|
||||||
|
requests = parse_batch_spec(batch_spec)
|
||||||
|
|
||||||
|
# Classify each request
|
||||||
|
types_present = set()
|
||||||
|
for req in requests:
|
||||||
|
if req.is_decode:
|
||||||
|
types_present.add("decode")
|
||||||
|
elif req.is_prefill:
|
||||||
|
types_present.add("prefill")
|
||||||
|
elif req.is_extend:
|
||||||
|
# Distinguish spec-decode (small q_len) from extend (chunked prefill)
|
||||||
|
if req.q_len <= spec_decode_threshold:
|
||||||
|
types_present.add("spec-decode")
|
||||||
|
else:
|
||||||
|
types_present.add("extend")
|
||||||
|
|
||||||
|
if len(types_present) == 1:
|
||||||
|
return types_present.pop()
|
||||||
|
elif len(types_present) > 1:
|
||||||
|
# Sort for consistent output
|
||||||
|
sorted_types = sorted(types_present)
|
||||||
|
return f"mixed ({'+'.join(sorted_types)})"
|
||||||
|
else:
|
||||||
|
return "unknown"
|
||||||
|
|||||||
@@ -43,6 +43,7 @@ from common import (
|
|||||||
ModelParameterSweep,
|
ModelParameterSweep,
|
||||||
ParameterSweep,
|
ParameterSweep,
|
||||||
ResultsFormatter,
|
ResultsFormatter,
|
||||||
|
batch_spec_sort_key,
|
||||||
is_mla_backend,
|
is_mla_backend,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -218,10 +219,13 @@ def run_model_parameter_sweep(
|
|||||||
by_param_and_spec[key].append(r)
|
by_param_and_spec[key].append(r)
|
||||||
break
|
break
|
||||||
|
|
||||||
# Sort by param value then spec
|
# Sort by param value then spec (batch_size, q_len, kv_len)
|
||||||
sorted_keys = sorted(
|
sorted_keys = sorted(
|
||||||
by_param_and_spec.keys(),
|
by_param_and_spec.keys(),
|
||||||
key=lambda x: (int(x[0]) if x[0].isdigit() else x[0], x[1]),
|
key=lambda x: (
|
||||||
|
int(x[0]) if x[0].isdigit() else x[0],
|
||||||
|
batch_spec_sort_key(x[1]),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
current_param_value = None
|
current_param_value = None
|
||||||
@@ -330,7 +334,7 @@ def run_parameter_sweep(
|
|||||||
by_spec[spec] = []
|
by_spec[spec] = []
|
||||||
by_spec[spec].append(r)
|
by_spec[spec].append(r)
|
||||||
|
|
||||||
for spec in sorted(by_spec.keys()):
|
for spec in sorted(by_spec.keys(), key=batch_spec_sort_key):
|
||||||
results = by_spec[spec]
|
results = by_spec[spec]
|
||||||
best = min(results, key=lambda r: r.mean_time)
|
best = min(results, key=lambda r: r.mean_time)
|
||||||
console.print(
|
console.print(
|
||||||
@@ -496,15 +500,18 @@ def main():
|
|||||||
if "description" in yaml_config:
|
if "description" in yaml_config:
|
||||||
console.print(f"[dim]{yaml_config['description']}[/]")
|
console.print(f"[dim]{yaml_config['description']}[/]")
|
||||||
|
|
||||||
# Override args with YAML values
|
# Override args with YAML values, but CLI args take precedence
|
||||||
# (YAML takes precedence unless CLI arg was explicitly set)
|
# Check if CLI provided backends (they would be non-None and not default)
|
||||||
# Backend(s)
|
cli_backends_provided = args.backends is not None or args.backend is not None
|
||||||
if "backend" in yaml_config:
|
|
||||||
args.backend = yaml_config["backend"]
|
# Backend(s) - only use YAML if CLI didn't specify
|
||||||
args.backends = None
|
if not cli_backends_provided:
|
||||||
elif "backends" in yaml_config:
|
if "backend" in yaml_config:
|
||||||
args.backends = yaml_config["backends"]
|
args.backend = yaml_config["backend"]
|
||||||
args.backend = None
|
args.backends = None
|
||||||
|
elif "backends" in yaml_config:
|
||||||
|
args.backends = yaml_config["backends"]
|
||||||
|
args.backend = None
|
||||||
|
|
||||||
# Check for special modes
|
# Check for special modes
|
||||||
if "mode" in yaml_config:
|
if "mode" in yaml_config:
|
||||||
@@ -544,13 +551,15 @@ def main():
|
|||||||
args.num_kv_heads = model.get("num_kv_heads", args.num_kv_heads)
|
args.num_kv_heads = model.get("num_kv_heads", args.num_kv_heads)
|
||||||
args.block_size = model.get("block_size", args.block_size)
|
args.block_size = model.get("block_size", args.block_size)
|
||||||
|
|
||||||
# Benchmark settings
|
# Benchmark settings (top-level keys)
|
||||||
if "benchmark" in yaml_config:
|
if "device" in yaml_config:
|
||||||
bench = yaml_config["benchmark"]
|
args.device = yaml_config["device"]
|
||||||
args.device = bench.get("device", args.device)
|
if "repeats" in yaml_config:
|
||||||
args.repeats = bench.get("repeats", args.repeats)
|
args.repeats = yaml_config["repeats"]
|
||||||
args.warmup_iters = bench.get("warmup_iters", args.warmup_iters)
|
if "warmup_iters" in yaml_config:
|
||||||
args.profile_memory = bench.get("profile_memory", args.profile_memory)
|
args.warmup_iters = yaml_config["warmup_iters"]
|
||||||
|
if "profile_memory" in yaml_config:
|
||||||
|
args.profile_memory = yaml_config["profile_memory"]
|
||||||
|
|
||||||
# Parameter sweep configuration
|
# Parameter sweep configuration
|
||||||
if "parameter_sweep" in yaml_config:
|
if "parameter_sweep" in yaml_config:
|
||||||
|
|||||||
@@ -10,18 +10,37 @@ from dataclasses import asdict, dataclass
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import torch
|
import torch
|
||||||
|
from batch_spec import get_batch_type, parse_batch_spec
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
from rich.table import Table
|
from rich.table import Table
|
||||||
|
|
||||||
|
|
||||||
|
def batch_spec_sort_key(spec: str) -> tuple[int, int, int]:
|
||||||
|
"""
|
||||||
|
Extract sorting key from batch spec: (batch_size, max_q_len, max_kv_len).
|
||||||
|
|
||||||
|
This ensures results are sorted by batch size first, then query length,
|
||||||
|
then sequence length, rather than alphabetically.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
requests = parse_batch_spec(spec)
|
||||||
|
batch_size = len(requests)
|
||||||
|
max_q_len = max(r.q_len for r in requests) if requests else 0
|
||||||
|
max_kv_len = max(r.kv_len for r in requests) if requests else 0
|
||||||
|
return (batch_size, max_q_len, max_kv_len)
|
||||||
|
except Exception:
|
||||||
|
# Fallback for unparseable specs
|
||||||
|
return (0, 0, 0)
|
||||||
|
|
||||||
|
|
||||||
# Mock classes for vLLM attention infrastructure
|
# Mock classes for vLLM attention infrastructure
|
||||||
|
|
||||||
|
|
||||||
class MockHfConfig:
|
class MockHfConfig:
|
||||||
"""Mock HuggingFace config that satisfies vLLM's requirements."""
|
"""Mock HuggingFace config that satisfies vLLM's requirements."""
|
||||||
|
|
||||||
def __init__(self, mla_dims: dict):
|
def __init__(self, mla_dims: dict, index_topk: int | None = None):
|
||||||
self.num_attention_heads = mla_dims["num_q_heads"]
|
self.num_attention_heads = mla_dims["num_q_heads"]
|
||||||
self.num_key_value_heads = mla_dims["num_kv_heads"]
|
self.num_key_value_heads = mla_dims["num_kv_heads"]
|
||||||
self.hidden_size = mla_dims["head_dim"] * mla_dims["num_q_heads"]
|
self.hidden_size = mla_dims["head_dim"] * mla_dims["num_q_heads"]
|
||||||
@@ -32,6 +51,8 @@ class MockHfConfig:
|
|||||||
self.qk_rope_head_dim = mla_dims["qk_rope_head_dim"]
|
self.qk_rope_head_dim = mla_dims["qk_rope_head_dim"]
|
||||||
self.v_head_dim = mla_dims["v_head_dim"]
|
self.v_head_dim = mla_dims["v_head_dim"]
|
||||||
self.qk_head_dim = mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"]
|
self.qk_head_dim = mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"]
|
||||||
|
if index_topk is not None:
|
||||||
|
self.index_topk = index_topk
|
||||||
|
|
||||||
def get_text_config(self):
|
def get_text_config(self):
|
||||||
return self
|
return self
|
||||||
@@ -40,10 +61,7 @@ class MockHfConfig:
|
|||||||
# Import AttentionLayerBase at module level to avoid circular dependencies
|
# Import AttentionLayerBase at module level to avoid circular dependencies
|
||||||
try:
|
try:
|
||||||
from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase
|
from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase
|
||||||
|
|
||||||
_HAS_ATTENTION_LAYER_BASE = True
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
_HAS_ATTENTION_LAYER_BASE = False
|
|
||||||
AttentionLayerBase = object # Fallback
|
AttentionLayerBase = object # Fallback
|
||||||
|
|
||||||
|
|
||||||
@@ -82,6 +100,38 @@ class MockKVBProj:
|
|||||||
return (result,) # Return as tuple to match ColumnParallelLinear API
|
return (result,) # Return as tuple to match ColumnParallelLinear API
|
||||||
|
|
||||||
|
|
||||||
|
class MockIndexer:
|
||||||
|
"""Mock Indexer for sparse MLA backends.
|
||||||
|
|
||||||
|
Provides topk_indices_buffer that sparse MLA backends use to determine
|
||||||
|
which KV cache slots to attend to for each token.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
max_num_tokens: int,
|
||||||
|
topk_tokens: int,
|
||||||
|
device: torch.device,
|
||||||
|
):
|
||||||
|
self.topk_tokens = topk_tokens
|
||||||
|
self.topk_indices_buffer = torch.zeros(
|
||||||
|
(max_num_tokens, topk_tokens),
|
||||||
|
dtype=torch.int32,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
|
||||||
|
def fill_random_indices(self, num_tokens: int, max_kv_len: int):
|
||||||
|
"""Fill topk_indices_buffer with random valid indices for benchmarking."""
|
||||||
|
indices = torch.randint(
|
||||||
|
0,
|
||||||
|
max_kv_len,
|
||||||
|
(num_tokens, self.topk_tokens),
|
||||||
|
dtype=torch.int32,
|
||||||
|
device=self.topk_indices_buffer.device,
|
||||||
|
)
|
||||||
|
self.topk_indices_buffer[:num_tokens] = indices
|
||||||
|
|
||||||
|
|
||||||
class MockLayer(AttentionLayerBase):
|
class MockLayer(AttentionLayerBase):
|
||||||
"""Mock attention layer with scale parameters and impl.
|
"""Mock attention layer with scale parameters and impl.
|
||||||
|
|
||||||
@@ -113,95 +163,6 @@ class MockLayer(AttentionLayerBase):
|
|||||||
return self._kv_cache_spec
|
return self._kv_cache_spec
|
||||||
|
|
||||||
|
|
||||||
class MockModelConfig:
|
|
||||||
"""Mock model configuration."""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
num_q_heads: int,
|
|
||||||
num_kv_heads: int,
|
|
||||||
head_dim: int,
|
|
||||||
dtype: torch.dtype = torch.float16,
|
|
||||||
max_model_len: int = 32768,
|
|
||||||
):
|
|
||||||
self._n_q = num_q_heads
|
|
||||||
self._n_kv = num_kv_heads
|
|
||||||
self._d = head_dim
|
|
||||||
self.dtype = dtype
|
|
||||||
self.max_model_len = max_model_len
|
|
||||||
|
|
||||||
def get_num_attention_heads(self, _=None) -> int:
|
|
||||||
return self._n_q
|
|
||||||
|
|
||||||
def get_num_kv_heads(self, _=None) -> int:
|
|
||||||
return self._n_kv
|
|
||||||
|
|
||||||
def get_head_size(self) -> int:
|
|
||||||
return self._d
|
|
||||||
|
|
||||||
def get_num_layers(self) -> int:
|
|
||||||
"""Mock method for layer count queries."""
|
|
||||||
return 1
|
|
||||||
|
|
||||||
def get_sliding_window_for_layer(self, _layer_idx: int):
|
|
||||||
"""Mock method for sliding window queries."""
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_logits_soft_cap_for_layer(self, _layer_idx: int):
|
|
||||||
"""Mock method for logits soft cap queries."""
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_sm_scale_for_layer(self, _layer_idx: int) -> float:
|
|
||||||
"""Mock method for SM scale queries."""
|
|
||||||
return 1.0 / (self.get_head_size() ** 0.5)
|
|
||||||
|
|
||||||
|
|
||||||
class MockParallelConfig:
|
|
||||||
"""Mock parallel configuration."""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class MockCompilationConfig:
|
|
||||||
"""Mock compilation configuration."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.full_cuda_graph = False
|
|
||||||
self.static_forward_context = {}
|
|
||||||
|
|
||||||
|
|
||||||
class MockVLLMConfig:
|
|
||||||
"""Mock VLLM configuration."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.compilation_config = MockCompilationConfig()
|
|
||||||
|
|
||||||
|
|
||||||
class MockRunner:
|
|
||||||
"""Mock GPU runner for metadata builders."""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
seq_lens: np.ndarray,
|
|
||||||
query_start_locs: np.ndarray,
|
|
||||||
device: torch.device,
|
|
||||||
num_q_heads: int,
|
|
||||||
num_kv_heads: int,
|
|
||||||
head_dim: int,
|
|
||||||
dtype: torch.dtype,
|
|
||||||
):
|
|
||||||
self.model_config = MockModelConfig(num_q_heads, num_kv_heads, head_dim, dtype)
|
|
||||||
self.parallel_config = MockParallelConfig()
|
|
||||||
self.vllm_config = MockVLLMConfig()
|
|
||||||
self.seq_lens_np = seq_lens
|
|
||||||
self.query_start_loc_np = query_start_locs
|
|
||||||
self.device = device
|
|
||||||
self.attention_chunk_size = None
|
|
||||||
self.num_query_heads = num_q_heads
|
|
||||||
self.num_kv_heads = num_kv_heads
|
|
||||||
self.dtype = dtype
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ParameterSweep:
|
class ParameterSweep:
|
||||||
"""Configuration for sweeping a backend parameter."""
|
"""Configuration for sweeping a backend parameter."""
|
||||||
@@ -316,14 +277,19 @@ class ResultsFormatter:
|
|||||||
backends: List of backend names being compared
|
backends: List of backend names being compared
|
||||||
compare_to_fastest: Show percentage comparison to fastest
|
compare_to_fastest: Show percentage comparison to fastest
|
||||||
"""
|
"""
|
||||||
# Group by batch spec
|
# Group by batch spec, preserving first-occurrence order
|
||||||
by_spec = {}
|
by_spec = {}
|
||||||
|
specs_order = []
|
||||||
for r in results:
|
for r in results:
|
||||||
spec = r.config.batch_spec
|
spec = r.config.batch_spec
|
||||||
if spec not in by_spec:
|
if spec not in by_spec:
|
||||||
by_spec[spec] = {}
|
by_spec[spec] = {}
|
||||||
|
specs_order.append(spec)
|
||||||
by_spec[spec][r.config.backend] = r
|
by_spec[spec][r.config.backend] = r
|
||||||
|
|
||||||
|
# Sort specs by (batch_size, q_len, kv_len) instead of alphabetically
|
||||||
|
specs_order = sorted(by_spec.keys(), key=batch_spec_sort_key)
|
||||||
|
|
||||||
# Create shortened backend names for display
|
# Create shortened backend names for display
|
||||||
def shorten_backend_name(name: str) -> str:
|
def shorten_backend_name(name: str) -> str:
|
||||||
"""Shorten long backend names for table display."""
|
"""Shorten long backend names for table display."""
|
||||||
@@ -337,6 +303,8 @@ class ResultsFormatter:
|
|||||||
|
|
||||||
table = Table(title="Attention Benchmark Results")
|
table = Table(title="Attention Benchmark Results")
|
||||||
table.add_column("Batch\nSpec", no_wrap=True)
|
table.add_column("Batch\nSpec", no_wrap=True)
|
||||||
|
table.add_column("Type", no_wrap=True)
|
||||||
|
table.add_column("Batch\nSize", justify="right", no_wrap=True)
|
||||||
|
|
||||||
multi = len(backends) > 1
|
multi = len(backends) > 1
|
||||||
for backend in backends:
|
for backend in backends:
|
||||||
@@ -350,12 +318,14 @@ class ResultsFormatter:
|
|||||||
table.add_column(col_rel, justify="right", no_wrap=False)
|
table.add_column(col_rel, justify="right", no_wrap=False)
|
||||||
|
|
||||||
# Add rows
|
# Add rows
|
||||||
for spec in sorted(by_spec.keys()):
|
for spec in specs_order:
|
||||||
spec_results = by_spec[spec]
|
spec_results = by_spec[spec]
|
||||||
times = {b: r.mean_time for b, r in spec_results.items() if r.success}
|
times = {b: r.mean_time for b, r in spec_results.items() if r.success}
|
||||||
best_time = min(times.values()) if times else 0.0
|
best_time = min(times.values()) if times else 0.0
|
||||||
|
|
||||||
row = [spec]
|
batch_type = get_batch_type(spec)
|
||||||
|
batch_size = len(parse_batch_spec(spec))
|
||||||
|
row = [spec, batch_type, str(batch_size)]
|
||||||
for backend in backends:
|
for backend in backends:
|
||||||
if backend in spec_results:
|
if backend in spec_results:
|
||||||
r = spec_results[backend]
|
r = spec_results[backend]
|
||||||
@@ -486,10 +456,11 @@ def get_attention_scale(head_dim: int) -> float:
|
|||||||
|
|
||||||
def is_mla_backend(backend: str) -> bool:
|
def is_mla_backend(backend: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Check if backend is an MLA backend using the backend's is_mla() property.
|
Check if backend is an MLA backend using the AttentionBackendEnum.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
backend: Backend name (e.g., "CUTLASS_MLA", "FLASHINFER_MLA")
|
backend: Backend name matching AttentionBackendEnum exactly
|
||||||
|
(e.g., "FLASHMLA_SPARSE")
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if the backend is an MLA backend, False otherwise
|
True if the backend is an MLA backend, False otherwise
|
||||||
@@ -497,7 +468,8 @@ def is_mla_backend(backend: str) -> bool:
|
|||||||
from vllm.v1.attention.backends.registry import AttentionBackendEnum
|
from vllm.v1.attention.backends.registry import AttentionBackendEnum
|
||||||
|
|
||||||
try:
|
try:
|
||||||
backend_class = AttentionBackendEnum[backend.upper()].get_class()
|
backend_enum = AttentionBackendEnum[backend]
|
||||||
|
backend_class = backend_enum.get_class()
|
||||||
return backend_class.is_mla()
|
return backend_class.is_mla()
|
||||||
except (KeyError, ValueError, ImportError):
|
except (KeyError, ValueError, ImportError, AttributeError):
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
model:
|
model:
|
||||||
name: "deepseek-v3"
|
name: "deepseek-v3"
|
||||||
num_layers: 60
|
num_layers: 60
|
||||||
num_q_heads: 128
|
num_q_heads: 128 # Base value, can be swept for TP simulation
|
||||||
num_kv_heads: 1 # MLA uses single latent KV
|
num_kv_heads: 1 # MLA uses single latent KV
|
||||||
head_dim: 576
|
head_dim: 576
|
||||||
kv_lora_rank: 512
|
kv_lora_rank: 512
|
||||||
@@ -12,6 +12,13 @@ model:
|
|||||||
v_head_dim: 128
|
v_head_dim: 128
|
||||||
block_size: 128 # CUTLASS MLA and FlashAttn MLA use 128
|
block_size: 128 # CUTLASS MLA and FlashAttn MLA use 128
|
||||||
|
|
||||||
|
# Model parameter sweep: simulate tensor parallelism by varying num_q_heads
|
||||||
|
# TP=1: 128 heads, TP=2: 64 heads, TP=4: 32 heads, TP=8: 16 heads
|
||||||
|
model_parameter_sweep:
|
||||||
|
param_name: "num_q_heads"
|
||||||
|
values: [128, 64, 32, 16]
|
||||||
|
label_format: "{backend}_{value}h"
|
||||||
|
|
||||||
batch_specs:
|
batch_specs:
|
||||||
# Small batches, varying sequence lengths
|
# Small batches, varying sequence lengths
|
||||||
- "16q1s512" # 16 requests, 512 KV cache
|
- "16q1s512" # 16 requests, 512 KV cache
|
||||||
@@ -34,28 +41,30 @@ batch_specs:
|
|||||||
# Very large batches
|
# Very large batches
|
||||||
- "128q1s1k" # 128 requests, 1k KV cache
|
- "128q1s1k" # 128 requests, 1k KV cache
|
||||||
- "128q1s2k" # 128 requests, 2k KV cache
|
- "128q1s2k" # 128 requests, 2k KV cache
|
||||||
|
- "128q1s4k" # 128 requests, 4k KV cache
|
||||||
|
- "128q1s8k" # 128 requests, 8k KV cache
|
||||||
|
|
||||||
# Long context
|
# Long context
|
||||||
- "32q1s16k" # 32 requests, 16k KV cache
|
- "32q1s16k" # 32 requests, 16k KV cache
|
||||||
- "32q1s32k" # 32 requests, 32k KV cache
|
- "32q1s32k" # 32 requests, 32k KV cache
|
||||||
|
|
||||||
backends:
|
backends:
|
||||||
- cutlass_mla
|
- CUTLASS_MLA
|
||||||
- flashinfer_mla
|
- FLASHINFER_MLA
|
||||||
- flashattn_mla # Hopper only
|
- FLASH_ATTN_MLA # Hopper only
|
||||||
- flashmla # Hopper only
|
- FLASHMLA # Hopper only
|
||||||
|
|
||||||
device: "cuda:0"
|
device: "cuda:0"
|
||||||
repeats: 5
|
repeats: 100
|
||||||
warmup_iters: 3
|
warmup_iters: 10
|
||||||
profile_memory: true
|
profile_memory: true
|
||||||
|
|
||||||
# Backend-specific tuning
|
# Backend-specific tuning
|
||||||
cutlass_mla:
|
CUTLASS_MLA:
|
||||||
num_kv_splits: auto # or specific value like 4, 8, 16
|
num_kv_splits: auto # or specific value like 4, 8, 16
|
||||||
|
|
||||||
flashattn_mla:
|
FLASH_ATTN_MLA:
|
||||||
reorder_batch_threshold: 512
|
reorder_batch_threshold: 512
|
||||||
|
|
||||||
flashmla:
|
FLASHMLA:
|
||||||
reorder_batch_threshold: 1
|
reorder_batch_threshold: 1
|
||||||
|
|||||||
@@ -45,10 +45,10 @@ batch_specs:
|
|||||||
- "4q4k_60q1s4k" # 4 prefill + 60 decode
|
- "4q4k_60q1s4k" # 4 prefill + 60 decode
|
||||||
|
|
||||||
backends:
|
backends:
|
||||||
- cutlass_mla
|
- CUTLASS_MLA
|
||||||
- flashinfer_mla
|
- FLASHINFER_MLA
|
||||||
- flashattn_mla # Hopper only
|
- FLASH_ATTN_MLA # Hopper only
|
||||||
- flashmla # Hopper only
|
- FLASHMLA # Hopper only
|
||||||
|
|
||||||
device: "cuda:0"
|
device: "cuda:0"
|
||||||
repeats: 5
|
repeats: 5
|
||||||
|
|||||||
62
benchmarks/attention_benchmarks/configs/mla_prefill.yaml
Normal file
62
benchmarks/attention_benchmarks/configs/mla_prefill.yaml
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# MLA prefill-only benchmark configuration for sparse backends
|
||||||
|
|
||||||
|
model:
|
||||||
|
name: "deepseek-v3"
|
||||||
|
num_layers: 60
|
||||||
|
num_q_heads: 128
|
||||||
|
num_kv_heads: 1
|
||||||
|
head_dim: 576
|
||||||
|
kv_lora_rank: 512
|
||||||
|
qk_nope_head_dim: 128
|
||||||
|
qk_rope_head_dim: 64
|
||||||
|
v_head_dim: 128
|
||||||
|
block_size: 128
|
||||||
|
|
||||||
|
# Model parameter sweep: simulate tensor parallelism by varying num_q_heads
|
||||||
|
# TP=1: 128 heads, TP=2: 64 heads, TP=4: 32 heads, TP=8: 16 heads
|
||||||
|
model_parameter_sweep:
|
||||||
|
param_name: "num_q_heads"
|
||||||
|
values: [128, 64, 32, 16]
|
||||||
|
label_format: "{backend}_{value}h"
|
||||||
|
|
||||||
|
batch_specs:
|
||||||
|
# Pure prefill
|
||||||
|
- "1q512"
|
||||||
|
- "1q1k"
|
||||||
|
- "1q2k"
|
||||||
|
- "1q4k"
|
||||||
|
- "1q8k"
|
||||||
|
|
||||||
|
# Batched pure prefill
|
||||||
|
- "2q512"
|
||||||
|
- "2q1k"
|
||||||
|
- "2q2k"
|
||||||
|
- "2q4k"
|
||||||
|
- "2q8k"
|
||||||
|
- "4q512"
|
||||||
|
- "4q1k"
|
||||||
|
- "4q2k"
|
||||||
|
- "4q4k"
|
||||||
|
- "4q8k"
|
||||||
|
- "8q512"
|
||||||
|
- "8q1k"
|
||||||
|
- "8q2k"
|
||||||
|
- "8q4k"
|
||||||
|
- "8q8k"
|
||||||
|
|
||||||
|
# Extend
|
||||||
|
- "1q512s4k"
|
||||||
|
- "1q512s8k"
|
||||||
|
- "1q1ks8k"
|
||||||
|
- "1q2ks8k"
|
||||||
|
- "1q2ks16k"
|
||||||
|
- "1q4ks16k"
|
||||||
|
|
||||||
|
backends:
|
||||||
|
- FLASHMLA_SPARSE
|
||||||
|
- FLASHINFER_MLA_SPARSE
|
||||||
|
|
||||||
|
device: "cuda:0"
|
||||||
|
repeats: 10
|
||||||
|
warmup_iters: 3
|
||||||
|
profile_memory: true
|
||||||
@@ -6,7 +6,7 @@
|
|||||||
description: "Decode vs Prefill pipeline crossover analysis"
|
description: "Decode vs Prefill pipeline crossover analysis"
|
||||||
|
|
||||||
# Test FlashAttn MLA
|
# Test FlashAttn MLA
|
||||||
backend: flashattn_mla
|
backend: FLASH_ATTN_MLA
|
||||||
|
|
||||||
# Mode: decode_vs_prefill comparison (special sweep mode)
|
# Mode: decode_vs_prefill comparison (special sweep mode)
|
||||||
# For each batch spec, we'll test both decode and prefill pipelines
|
# For each batch spec, we'll test both decode and prefill pipelines
|
||||||
@@ -62,11 +62,10 @@ model:
|
|||||||
block_size: 128
|
block_size: 128
|
||||||
|
|
||||||
# Benchmark settings
|
# Benchmark settings
|
||||||
benchmark:
|
device: "cuda:0"
|
||||||
device: "cuda:0"
|
repeats: 15 # More repeats for spec decode variance
|
||||||
repeats: 15 # More repeats for spec decode variance
|
warmup_iters: 5
|
||||||
warmup_iters: 5
|
profile_memory: false
|
||||||
profile_memory: false
|
|
||||||
|
|
||||||
# Output
|
# Output
|
||||||
output:
|
output:
|
||||||
|
|||||||
@@ -41,18 +41,17 @@ batch_specs:
|
|||||||
|
|
||||||
# Backends that support query length > 1
|
# Backends that support query length > 1
|
||||||
backends:
|
backends:
|
||||||
- flashattn_mla # reorder_batch_threshold = 512
|
- FLASH_ATTN_MLA # reorder_batch_threshold = 512
|
||||||
- flashmla # reorder_batch_threshold = 1 (tunable)
|
- FLASHMLA # reorder_batch_threshold = 1 (tunable)
|
||||||
|
|
||||||
# FlashInfer-MLA also supports uniform spec-as-decode but with different mechanism
|
# FlashInfer-MLA also supports uniform spec-as-decode but with different mechanism
|
||||||
# - flashinfer_mla
|
# - FLASHINFER_MLA
|
||||||
|
|
||||||
# Benchmark settings
|
# Benchmark settings
|
||||||
benchmark:
|
device: "cuda:0"
|
||||||
device: "cuda:0"
|
repeats: 10 # More repeats for statistical significance
|
||||||
repeats: 10 # More repeats for statistical significance
|
warmup_iters: 5
|
||||||
warmup_iters: 5
|
profile_memory: false
|
||||||
profile_memory: false
|
|
||||||
|
|
||||||
# Test these threshold values for optimization
|
# Test these threshold values for optimization
|
||||||
parameter_sweep:
|
parameter_sweep:
|
||||||
|
|||||||
@@ -25,14 +25,22 @@ batch_specs:
|
|||||||
- "4q1k_16q1s2k" # 4 prefill + 16 decode
|
- "4q1k_16q1s2k" # 4 prefill + 16 decode
|
||||||
- "2q4k_32q1s1k" # 2 large prefill + 32 decode
|
- "2q4k_32q1s1k" # 2 large prefill + 32 decode
|
||||||
|
|
||||||
# Context extension
|
# Speculative decode (q <= 8)
|
||||||
- "q1ks2k" # 1k query, 2k sequence (chunked prefill)
|
- "16q2s1k" # 16 requests, 2 spec tokens, 1k KV cache
|
||||||
|
- "16q4s1k" # 16 requests, 4 spec tokens, 1k KV cache
|
||||||
|
- "16q8s1k" # 16 requests, 8 spec tokens, 1k KV cache
|
||||||
|
- "32q4s2k" # 32 requests, 4 spec tokens, 2k KV cache
|
||||||
|
- "8q8s4k" # 8 requests, 8 spec tokens, 4k KV cache
|
||||||
|
|
||||||
|
# Context extension (chunked prefill)
|
||||||
|
- "q1ks2k" # 1k query, 2k sequence
|
||||||
- "2q1ks4k" # 2 requests: 1k query, 4k sequence
|
- "2q1ks4k" # 2 requests: 1k query, 4k sequence
|
||||||
|
|
||||||
|
# Available backends: FLASH_ATTN, TRITON_ATTN, FLASHINFER
|
||||||
backends:
|
backends:
|
||||||
- flash
|
- FLASH_ATTN
|
||||||
- triton
|
- TRITON_ATTN
|
||||||
- flashinfer
|
- FLASHINFER
|
||||||
|
|
||||||
device: "cuda:0"
|
device: "cuda:0"
|
||||||
repeats: 5
|
repeats: 5
|
||||||
|
|||||||
@@ -8,14 +8,13 @@ This module provides helpers for running MLA backends without
|
|||||||
needing full VllmConfig integration.
|
needing full VllmConfig integration.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import importlib
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
from batch_spec import parse_batch_spec
|
from batch_spec import parse_batch_spec
|
||||||
from common import (
|
from common import (
|
||||||
BenchmarkResult,
|
BenchmarkResult,
|
||||||
MockHfConfig,
|
MockHfConfig,
|
||||||
|
MockIndexer,
|
||||||
MockKVBProj,
|
MockKVBProj,
|
||||||
MockLayer,
|
MockLayer,
|
||||||
setup_mla_dims,
|
setup_mla_dims,
|
||||||
@@ -62,6 +61,7 @@ def create_minimal_vllm_config(
|
|||||||
block_size: int = 128,
|
block_size: int = 128,
|
||||||
max_num_seqs: int = 256,
|
max_num_seqs: int = 256,
|
||||||
mla_dims: dict | None = None,
|
mla_dims: dict | None = None,
|
||||||
|
index_topk: int | None = None,
|
||||||
) -> VllmConfig:
|
) -> VllmConfig:
|
||||||
"""
|
"""
|
||||||
Create minimal VllmConfig for MLA benchmarks.
|
Create minimal VllmConfig for MLA benchmarks.
|
||||||
@@ -73,6 +73,8 @@ def create_minimal_vllm_config(
|
|||||||
max_num_seqs: Maximum number of sequences
|
max_num_seqs: Maximum number of sequences
|
||||||
mla_dims: Optional custom MLA dimensions dict. If not provided, uses
|
mla_dims: Optional custom MLA dimensions dict. If not provided, uses
|
||||||
setup_mla_dims(model_name)
|
setup_mla_dims(model_name)
|
||||||
|
index_topk: Optional topk value for sparse MLA backends. If provided,
|
||||||
|
the config will include index_topk for sparse attention.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
VllmConfig for benchmarking
|
VllmConfig for benchmarking
|
||||||
@@ -82,7 +84,7 @@ def create_minimal_vllm_config(
|
|||||||
mla_dims = setup_mla_dims(model_name)
|
mla_dims = setup_mla_dims(model_name)
|
||||||
|
|
||||||
# Create mock HF config first (avoids downloading from HuggingFace)
|
# Create mock HF config first (avoids downloading from HuggingFace)
|
||||||
mock_hf_config = MockHfConfig(mla_dims)
|
mock_hf_config = MockHfConfig(mla_dims, index_topk=index_topk)
|
||||||
|
|
||||||
# Create a temporary minimal config.json to avoid HF downloads
|
# Create a temporary minimal config.json to avoid HF downloads
|
||||||
# This ensures consistent ModelConfig construction without network access
|
# This ensures consistent ModelConfig construction without network access
|
||||||
@@ -120,16 +122,12 @@ def create_minimal_vllm_config(
|
|||||||
seed=0,
|
seed=0,
|
||||||
max_model_len=32768,
|
max_model_len=32768,
|
||||||
quantization=None,
|
quantization=None,
|
||||||
quantization_param_path=None,
|
|
||||||
enforce_eager=False,
|
enforce_eager=False,
|
||||||
max_context_len_to_capture=None,
|
|
||||||
max_seq_len_to_capture=8192,
|
|
||||||
max_logprobs=20,
|
max_logprobs=20,
|
||||||
disable_sliding_window=False,
|
disable_sliding_window=False,
|
||||||
skip_tokenizer_init=True,
|
skip_tokenizer_init=True,
|
||||||
served_model_name=None,
|
served_model_name=None,
|
||||||
limit_mm_per_prompt=None,
|
limit_mm_per_prompt=None,
|
||||||
use_async_output_proc=True,
|
|
||||||
config_format="auto",
|
config_format="auto",
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
@@ -180,56 +178,65 @@ def create_minimal_vllm_config(
|
|||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
# Backend name to class name prefix mapping
|
# Backend-specific properties that can't be inferred from the backend class
|
||||||
_BACKEND_NAME_MAP = {
|
# Keys are AttentionBackendEnum names (uppercase)
|
||||||
"flashattn_mla": "FlashAttnMLA",
|
|
||||||
"flashmla": "FlashMLA",
|
|
||||||
"flashinfer_mla": "FlashInferMLA",
|
|
||||||
"cutlass_mla": "CutlassMLA",
|
|
||||||
}
|
|
||||||
|
|
||||||
# Special properties that differ from defaults
|
|
||||||
_BACKEND_PROPERTIES = {
|
_BACKEND_PROPERTIES = {
|
||||||
"flashmla": {
|
"FLASHMLA": {
|
||||||
"query_format": "concat", # Single concatenated tensor (vs tuple)
|
"query_format": "concat", # Single concatenated tensor (vs tuple)
|
||||||
"block_size": 64, # FlashMLA uses fixed block size
|
|
||||||
},
|
},
|
||||||
"flashinfer_mla": {
|
"FLASHMLA_SPARSE": {
|
||||||
"block_size": 64, # FlashInfer MLA only supports 32 or 64
|
"query_format": "concat", # Single concatenated tensor (vs tuple)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def _get_backend_config(backend: str) -> dict:
|
def _get_backend_config(backend: str) -> dict:
|
||||||
"""
|
"""
|
||||||
Get backend configuration using naming conventions.
|
Get backend configuration from AttentionBackendEnum.
|
||||||
|
|
||||||
All MLA backends follow the pattern:
|
Uses the registry to get the backend class and extract configuration
|
||||||
- Module: vllm.v1.attention.backends.mla.{backend}
|
from its methods (get_impl_cls, get_builder_cls, is_sparse, etc.).
|
||||||
- Impl: {Name}Impl
|
|
||||||
- Metadata: {Name}Metadata (or MLACommonMetadata)
|
Args:
|
||||||
- DecodeMetadata: {Name}DecodeMetadata (or MLACommonDecodeMetadata)
|
backend: Backend name matching AttentionBackendEnum exactly
|
||||||
- MetadataBuilder: {Name}MetadataBuilder
|
(e.g., "FLASHMLA_SPARSE")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with backend configuration
|
||||||
"""
|
"""
|
||||||
if backend not in _BACKEND_NAME_MAP:
|
from vllm.v1.attention.backends.registry import AttentionBackendEnum
|
||||||
raise ValueError(f"Unknown backend: {backend}")
|
|
||||||
|
|
||||||
name = _BACKEND_NAME_MAP[backend]
|
try:
|
||||||
|
backend_enum = AttentionBackendEnum[backend]
|
||||||
|
backend_class = backend_enum.get_class()
|
||||||
|
except (KeyError, ValueError) as e:
|
||||||
|
valid_backends = [e.name for e in AttentionBackendEnum if e.name != "CUSTOM"]
|
||||||
|
raise ValueError(
|
||||||
|
f"Unknown backend: {backend}. "
|
||||||
|
f"Valid MLA backends: {[b for b in valid_backends if 'MLA' in b]}"
|
||||||
|
) from e
|
||||||
|
|
||||||
|
# Get block size from backend class
|
||||||
|
block_sizes = backend_class.get_supported_kernel_block_sizes()
|
||||||
|
# Use first supported block size (backends typically support one for MLA)
|
||||||
|
block_size = block_sizes[0] if block_sizes else None
|
||||||
|
if hasattr(block_size, "value"):
|
||||||
|
# Handle MultipleOf enum
|
||||||
|
block_size = None
|
||||||
|
|
||||||
|
# Check if sparse via class method if available
|
||||||
|
is_sparse = getattr(backend_class, "is_sparse", lambda: False)()
|
||||||
|
|
||||||
|
# Get properties that can't be inferred
|
||||||
props = _BACKEND_PROPERTIES.get(backend, {})
|
props = _BACKEND_PROPERTIES.get(backend, {})
|
||||||
|
|
||||||
# Check if backend uses common metadata (FlashInfer, CUTLASS)
|
|
||||||
uses_common = backend in ("flashinfer_mla", "cutlass_mla")
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"module": f"vllm.v1.attention.backends.mla.{backend}",
|
"backend_class": backend_class,
|
||||||
"impl_class": f"{name}Impl",
|
"impl_class": backend_class.get_impl_cls(),
|
||||||
"metadata_class": "MLACommonMetadata" if uses_common else f"{name}Metadata",
|
"builder_class": backend_class.get_builder_cls(),
|
||||||
"decode_metadata_class": "MLACommonDecodeMetadata"
|
|
||||||
if uses_common
|
|
||||||
else f"{name}DecodeMetadata",
|
|
||||||
"builder_class": f"{name}MetadataBuilder",
|
|
||||||
"query_format": props.get("query_format", "tuple"),
|
"query_format": props.get("query_format", "tuple"),
|
||||||
"block_size": props.get("block_size", None),
|
"block_size": block_size,
|
||||||
|
"is_sparse": is_sparse,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -447,22 +454,26 @@ def _create_backend_impl(
|
|||||||
mla_dims: dict,
|
mla_dims: dict,
|
||||||
vllm_config: VllmConfig,
|
vllm_config: VllmConfig,
|
||||||
device: torch.device,
|
device: torch.device,
|
||||||
|
max_num_tokens: int = 8192,
|
||||||
|
index_topk: int | None = None,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Create backend implementation instance.
|
Create backend implementation instance.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
backend_cfg: Backend configuration dict
|
backend_cfg: Backend configuration dict from _get_backend_config()
|
||||||
mla_dims: MLA dimension configuration
|
mla_dims: MLA dimension configuration
|
||||||
vllm_config: VllmConfig instance
|
vllm_config: VllmConfig instance
|
||||||
device: Target device
|
device: Target device
|
||||||
|
max_num_tokens: Maximum number of tokens for sparse indexer buffer
|
||||||
|
index_topk: Topk value for sparse MLA backends
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (impl, layer, builder_instance)
|
Tuple of (impl, layer, builder_instance, indexer)
|
||||||
"""
|
"""
|
||||||
# Import backend classes
|
# Get classes from backend config (already resolved by _get_backend_config)
|
||||||
backend_module = importlib.import_module(backend_cfg["module"])
|
impl_class = backend_cfg["impl_class"]
|
||||||
impl_class = getattr(backend_module, backend_cfg["impl_class"])
|
builder_class = backend_cfg["builder_class"]
|
||||||
|
|
||||||
# Calculate scale
|
# Calculate scale
|
||||||
scale = 1.0 / np.sqrt(mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"])
|
scale = 1.0 / np.sqrt(mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"])
|
||||||
@@ -474,26 +485,44 @@ def _create_backend_impl(
|
|||||||
v_head_dim=mla_dims["v_head_dim"],
|
v_head_dim=mla_dims["v_head_dim"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Create indexer for sparse backends
|
||||||
|
indexer = None
|
||||||
|
if backend_cfg.get("is_sparse", False):
|
||||||
|
if index_topk is None:
|
||||||
|
index_topk = 2048 # Default topk for sparse MLA
|
||||||
|
indexer = MockIndexer(
|
||||||
|
max_num_tokens=max_num_tokens,
|
||||||
|
topk_tokens=index_topk,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build impl kwargs
|
||||||
|
impl_kwargs = {
|
||||||
|
"num_heads": mla_dims["num_q_heads"],
|
||||||
|
"head_size": mla_dims["head_dim"],
|
||||||
|
"scale": scale,
|
||||||
|
"num_kv_heads": mla_dims["num_kv_heads"],
|
||||||
|
"alibi_slopes": None,
|
||||||
|
"sliding_window": None,
|
||||||
|
"kv_cache_dtype": "auto",
|
||||||
|
"logits_soft_cap": None,
|
||||||
|
"attn_type": "decoder",
|
||||||
|
"kv_sharing_target_layer_name": None,
|
||||||
|
"q_lora_rank": None,
|
||||||
|
"kv_lora_rank": mla_dims["kv_lora_rank"],
|
||||||
|
"qk_nope_head_dim": mla_dims["qk_nope_head_dim"],
|
||||||
|
"qk_rope_head_dim": mla_dims["qk_rope_head_dim"],
|
||||||
|
"qk_head_dim": mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"],
|
||||||
|
"v_head_dim": mla_dims["v_head_dim"],
|
||||||
|
"kv_b_proj": mock_kv_b_proj,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add indexer for sparse backends
|
||||||
|
if indexer is not None:
|
||||||
|
impl_kwargs["indexer"] = indexer
|
||||||
|
|
||||||
# Create impl
|
# Create impl
|
||||||
impl = impl_class(
|
impl = impl_class(**impl_kwargs)
|
||||||
num_heads=mla_dims["num_q_heads"],
|
|
||||||
head_size=mla_dims["head_dim"],
|
|
||||||
scale=scale,
|
|
||||||
num_kv_heads=mla_dims["num_kv_heads"],
|
|
||||||
alibi_slopes=None,
|
|
||||||
sliding_window=None,
|
|
||||||
kv_cache_dtype="auto",
|
|
||||||
logits_soft_cap=None,
|
|
||||||
attn_type="decoder",
|
|
||||||
kv_sharing_target_layer_name=None,
|
|
||||||
q_lora_rank=None,
|
|
||||||
kv_lora_rank=mla_dims["kv_lora_rank"],
|
|
||||||
qk_nope_head_dim=mla_dims["qk_nope_head_dim"],
|
|
||||||
qk_rope_head_dim=mla_dims["qk_rope_head_dim"],
|
|
||||||
qk_head_dim=mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"],
|
|
||||||
v_head_dim=mla_dims["v_head_dim"],
|
|
||||||
kv_b_proj=mock_kv_b_proj,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize DCP attributes
|
# Initialize DCP attributes
|
||||||
if not hasattr(impl, "dcp_world_size") or impl.dcp_world_size in (None, -1):
|
if not hasattr(impl, "dcp_world_size") or impl.dcp_world_size in (None, -1):
|
||||||
@@ -515,9 +544,7 @@ def _create_backend_impl(
|
|||||||
|
|
||||||
# Create builder instance if needed
|
# Create builder instance if needed
|
||||||
builder_instance = None
|
builder_instance = None
|
||||||
if backend_cfg["builder_class"]:
|
if builder_class:
|
||||||
builder_class = getattr(backend_module, backend_cfg["builder_class"])
|
|
||||||
|
|
||||||
# Populate static_forward_context so builder can find the layer
|
# Populate static_forward_context so builder can find the layer
|
||||||
# MockLayer inherits from AttentionLayerBase, so isinstance checks pass
|
# MockLayer inherits from AttentionLayerBase, so isinstance checks pass
|
||||||
vllm_config.compilation_config.static_forward_context = {"placeholder": layer}
|
vllm_config.compilation_config.static_forward_context = {"placeholder": layer}
|
||||||
@@ -529,7 +556,7 @@ def _create_backend_impl(
|
|||||||
device=device,
|
device=device,
|
||||||
)
|
)
|
||||||
|
|
||||||
return impl, layer, builder_instance
|
return impl, layer, builder_instance, indexer
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
@@ -594,6 +621,7 @@ def _run_single_benchmark(
|
|||||||
backend_cfg: dict,
|
backend_cfg: dict,
|
||||||
mla_dims: dict,
|
mla_dims: dict,
|
||||||
device: torch.device,
|
device: torch.device,
|
||||||
|
indexer=None,
|
||||||
) -> BenchmarkResult:
|
) -> BenchmarkResult:
|
||||||
"""
|
"""
|
||||||
Run a single benchmark iteration.
|
Run a single benchmark iteration.
|
||||||
@@ -606,6 +634,7 @@ def _run_single_benchmark(
|
|||||||
backend_cfg: Backend configuration dict
|
backend_cfg: Backend configuration dict
|
||||||
mla_dims: MLA dimension configuration
|
mla_dims: MLA dimension configuration
|
||||||
device: Target device
|
device: Target device
|
||||||
|
indexer: Optional MockIndexer for sparse backends
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
BenchmarkResult with timing statistics
|
BenchmarkResult with timing statistics
|
||||||
@@ -613,7 +642,9 @@ def _run_single_benchmark(
|
|||||||
# Parse batch spec
|
# Parse batch spec
|
||||||
requests = parse_batch_spec(config.batch_spec)
|
requests = parse_batch_spec(config.batch_spec)
|
||||||
q_lens = [r.q_len for r in requests]
|
q_lens = [r.q_len for r in requests]
|
||||||
|
kv_lens = [r.kv_len for r in requests]
|
||||||
total_q = sum(q_lens)
|
total_q = sum(q_lens)
|
||||||
|
max_kv_len = max(kv_lens)
|
||||||
|
|
||||||
# Determine block size
|
# Determine block size
|
||||||
block_size = backend_cfg["block_size"] or config.block_size
|
block_size = backend_cfg["block_size"] or config.block_size
|
||||||
@@ -641,8 +672,16 @@ def _run_single_benchmark(
|
|||||||
torch.bfloat16,
|
torch.bfloat16,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Determine which forward method to use based on metadata
|
# Fill indexer with random indices for sparse backends
|
||||||
if metadata.decode is not None:
|
is_sparse = backend_cfg.get("is_sparse", False)
|
||||||
|
if is_sparse and indexer is not None:
|
||||||
|
indexer.fill_random_indices(total_q, max_kv_len)
|
||||||
|
|
||||||
|
# Determine which forward method to use
|
||||||
|
if is_sparse:
|
||||||
|
# Sparse backends use forward_mqa
|
||||||
|
forward_fn = lambda: impl.forward_mqa(decode_inputs, kv_cache, metadata, layer)
|
||||||
|
elif metadata.decode is not None:
|
||||||
forward_fn = lambda: impl._forward_decode(
|
forward_fn = lambda: impl._forward_decode(
|
||||||
decode_inputs, kv_cache, metadata, layer
|
decode_inputs, kv_cache, metadata, layer
|
||||||
)
|
)
|
||||||
@@ -693,11 +732,13 @@ def _run_single_benchmark(
|
|||||||
def _run_mla_benchmark_batched(
|
def _run_mla_benchmark_batched(
|
||||||
backend: str,
|
backend: str,
|
||||||
configs_with_params: list[tuple], # [(config, threshold, num_splits), ...]
|
configs_with_params: list[tuple], # [(config, threshold, num_splits), ...]
|
||||||
|
index_topk: int = 2048,
|
||||||
) -> list[BenchmarkResult]:
|
) -> list[BenchmarkResult]:
|
||||||
"""
|
"""
|
||||||
Unified batched MLA benchmark runner for all backends.
|
Unified batched MLA benchmark runner for all backends.
|
||||||
|
|
||||||
Works for: flashattn_mla, flashmla, flashinfer_mla, cutlass_mla
|
Works for: flashattn_mla, flashmla, flashinfer_mla, cutlass_mla,
|
||||||
|
flashinfer_mla_sparse, flashmla_sparse
|
||||||
|
|
||||||
This function reuses backend initialization across multiple benchmarks
|
This function reuses backend initialization across multiple benchmarks
|
||||||
to avoid setup/teardown overhead.
|
to avoid setup/teardown overhead.
|
||||||
@@ -707,6 +748,7 @@ def _run_mla_benchmark_batched(
|
|||||||
configs_with_params: List of (config, threshold, num_splits) tuples
|
configs_with_params: List of (config, threshold, num_splits) tuples
|
||||||
- threshold: reorder_batch_threshold (FlashAttn/FlashMLA only)
|
- threshold: reorder_batch_threshold (FlashAttn/FlashMLA only)
|
||||||
- num_splits: num_kv_splits (CUTLASS only)
|
- num_splits: num_kv_splits (CUTLASS only)
|
||||||
|
index_topk: Topk value for sparse MLA backends (default 2048)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
List of BenchmarkResult objects
|
List of BenchmarkResult objects
|
||||||
@@ -730,19 +772,27 @@ def _run_mla_benchmark_batched(
|
|||||||
if mla_dims is None:
|
if mla_dims is None:
|
||||||
mla_dims = setup_mla_dims("deepseek-v3")
|
mla_dims = setup_mla_dims("deepseek-v3")
|
||||||
|
|
||||||
|
# Determine if this is a sparse backend
|
||||||
|
is_sparse = backend_cfg.get("is_sparse", False)
|
||||||
|
|
||||||
# Create and set vLLM config for MLA (reused across all benchmarks)
|
# Create and set vLLM config for MLA (reused across all benchmarks)
|
||||||
vllm_config = create_minimal_vllm_config(
|
vllm_config = create_minimal_vllm_config(
|
||||||
model_name="deepseek-v3", # Used only for model path
|
model_name="deepseek-v3", # Used only for model path
|
||||||
block_size=block_size,
|
block_size=block_size,
|
||||||
mla_dims=mla_dims, # Use custom dims from config or default
|
mla_dims=mla_dims, # Use custom dims from config or default
|
||||||
|
index_topk=index_topk if is_sparse else None,
|
||||||
)
|
)
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
with set_current_vllm_config(vllm_config):
|
with set_current_vllm_config(vllm_config):
|
||||||
# Create backend impl, layer, and builder (reused across benchmarks)
|
# Create backend impl, layer, builder, and indexer (reused across benchmarks)
|
||||||
impl, layer, builder_instance = _create_backend_impl(
|
impl, layer, builder_instance, indexer = _create_backend_impl(
|
||||||
backend_cfg, mla_dims, vllm_config, device
|
backend_cfg,
|
||||||
|
mla_dims,
|
||||||
|
vllm_config,
|
||||||
|
device,
|
||||||
|
index_topk=index_topk if is_sparse else None,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Run each benchmark with the shared impl
|
# Run each benchmark with the shared impl
|
||||||
@@ -768,6 +818,7 @@ def _run_mla_benchmark_batched(
|
|||||||
backend_cfg,
|
backend_cfg,
|
||||||
mla_dims,
|
mla_dims,
|
||||||
device,
|
device,
|
||||||
|
indexer=indexer,
|
||||||
)
|
)
|
||||||
results.append(result)
|
results.append(result)
|
||||||
|
|
||||||
@@ -793,20 +844,24 @@ def run_mla_benchmark(
|
|||||||
config,
|
config,
|
||||||
reorder_batch_threshold: int | None = None,
|
reorder_batch_threshold: int | None = None,
|
||||||
num_kv_splits: int | None = None,
|
num_kv_splits: int | None = None,
|
||||||
|
index_topk: int = 2048,
|
||||||
) -> BenchmarkResult | list[BenchmarkResult]:
|
) -> BenchmarkResult | list[BenchmarkResult]:
|
||||||
"""
|
"""
|
||||||
Unified MLA benchmark runner for all backends.
|
Unified MLA benchmark runner for all backends.
|
||||||
|
|
||||||
Works for: flashattn_mla, flashmla, flashinfer_mla, cutlass_mla
|
Works for: flashattn_mla, flashmla, flashinfer_mla, cutlass_mla,
|
||||||
|
flashinfer_mla_sparse, flashmla_sparse
|
||||||
|
|
||||||
Always uses batched execution internally for optimal performance.
|
Always uses batched execution internally for optimal performance.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
backend: Backend name (flashattn_mla, flashmla, flashinfer_mla, cutlass_mla)
|
backend: Backend name (flashattn_mla, flashmla, flashinfer_mla, cutlass_mla,
|
||||||
|
flashinfer_mla_sparse, flashmla_sparse)
|
||||||
config: BenchmarkConfig or list of (BenchmarkConfig, param) tuples
|
config: BenchmarkConfig or list of (BenchmarkConfig, param) tuples
|
||||||
reorder_batch_threshold: Threshold override for FlashAttn/FlashMLA
|
reorder_batch_threshold: Threshold override for FlashAttn/FlashMLA
|
||||||
(single config mode only)
|
(single config mode only)
|
||||||
num_kv_splits: Number of KV splits for CUTLASS (single config mode only)
|
num_kv_splits: Number of KV splits for CUTLASS (single config mode only)
|
||||||
|
index_topk: Topk value for sparse MLA backends (default 2048)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
BenchmarkResult (single mode) or list of BenchmarkResult (batched mode)
|
BenchmarkResult (single mode) or list of BenchmarkResult (batched mode)
|
||||||
@@ -816,9 +871,9 @@ def run_mla_benchmark(
|
|||||||
# Already in batched format
|
# Already in batched format
|
||||||
if len(config) > 0 and isinstance(config[0], tuple):
|
if len(config) > 0 and isinstance(config[0], tuple):
|
||||||
# Format: [(cfg, param), ...] where param is threshold or num_splits
|
# Format: [(cfg, param), ...] where param is threshold or num_splits
|
||||||
if backend in ("flashattn_mla", "flashmla"):
|
if backend in ("flashattn_mla", "flashmla", "flashmla_sparse"):
|
||||||
configs_with_params = [(cfg, param, None) for cfg, param in config]
|
configs_with_params = [(cfg, param, None) for cfg, param in config]
|
||||||
else: # cutlass_mla or flashinfer_mla
|
else: # cutlass_mla, flashinfer_mla, or sparse backends
|
||||||
configs_with_params = [(cfg, None, param) for cfg, param in config]
|
configs_with_params = [(cfg, None, param) for cfg, param in config]
|
||||||
else:
|
else:
|
||||||
# Format: [cfg, ...] - just configs
|
# Format: [cfg, ...] - just configs
|
||||||
@@ -830,7 +885,7 @@ def run_mla_benchmark(
|
|||||||
return_single = True
|
return_single = True
|
||||||
|
|
||||||
# Use unified batched execution
|
# Use unified batched execution
|
||||||
results = _run_mla_benchmark_batched(backend, configs_with_params)
|
results = _run_mla_benchmark_batched(backend, configs_with_params, index_topk)
|
||||||
|
|
||||||
# Return single result or list based on input
|
# Return single result or list based on input
|
||||||
return results[0] if return_single else results
|
return results[0] if return_single else results
|
||||||
|
|||||||
@@ -8,7 +8,9 @@ This module provides helpers for running standard attention backends
|
|||||||
(FlashAttention, Triton, FlashInfer) with real vLLM integration.
|
(FlashAttention, Triton, FlashInfer) with real vLLM integration.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
import types
|
import types
|
||||||
|
from contextlib import contextmanager
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
@@ -24,8 +26,13 @@ from vllm.config import (
|
|||||||
ParallelConfig,
|
ParallelConfig,
|
||||||
SchedulerConfig,
|
SchedulerConfig,
|
||||||
VllmConfig,
|
VllmConfig,
|
||||||
|
set_current_vllm_config,
|
||||||
|
)
|
||||||
|
from vllm.v1.attention.backends.utils import (
|
||||||
|
CommonAttentionMetadata,
|
||||||
|
get_kv_cache_layout,
|
||||||
|
set_kv_cache_layout,
|
||||||
)
|
)
|
||||||
from vllm.v1.attention.backends.utils import CommonAttentionMetadata
|
|
||||||
from vllm.v1.kv_cache_interface import FullAttentionSpec
|
from vllm.v1.kv_cache_interface import FullAttentionSpec
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
@@ -33,37 +40,41 @@ from vllm.v1.kv_cache_interface import FullAttentionSpec
|
|||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
||||||
|
|
||||||
_BACKEND_CONFIG = {
|
|
||||||
"flash": {
|
|
||||||
"module": "vllm.v1.attention.backends.flash_attn",
|
|
||||||
"backend_class": "FlashAttentionBackend",
|
|
||||||
"dtype": torch.float16,
|
|
||||||
"cache_layout": "standard",
|
|
||||||
# ^ [2, num_blocks, block_size, num_kv_heads, head_dim]
|
|
||||||
},
|
|
||||||
"triton": {
|
|
||||||
"module": "vllm.v1.attention.backends.triton_attn",
|
|
||||||
"backend_class": "TritonAttentionBackend",
|
|
||||||
"dtype": torch.float32,
|
|
||||||
"cache_layout": "standard",
|
|
||||||
},
|
|
||||||
"flashinfer": {
|
|
||||||
"module": "vllm.v1.attention.backends.flashinfer",
|
|
||||||
"backend_class": "FlashInferBackend",
|
|
||||||
"dtype": torch.float16,
|
|
||||||
"cache_layout": "flashinfer",
|
|
||||||
# ^ [num_blocks, 2, block_size, num_kv_heads, head_dim]
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _get_backend_config(backend: str) -> dict:
|
def _get_backend_config(backend: str) -> dict:
|
||||||
if backend not in _BACKEND_CONFIG:
|
"""
|
||||||
|
Get backend configuration from AttentionBackendEnum.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
backend: Backend name matching AttentionBackendEnum exactly
|
||||||
|
(e.g., "FLASH_ATTN", "TRITON_ATTN", "FLASHINFER")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with backend_class
|
||||||
|
"""
|
||||||
|
from vllm.v1.attention.backends.registry import AttentionBackendEnum
|
||||||
|
|
||||||
|
try:
|
||||||
|
backend_enum = AttentionBackendEnum[backend]
|
||||||
|
backend_class = backend_enum.get_class()
|
||||||
|
except (KeyError, ValueError) as e:
|
||||||
|
valid_backends = [b.name for b in AttentionBackendEnum if b.name != "CUSTOM"]
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Unknown backend: {backend}. "
|
f"Unknown backend: {backend}. Valid backends: {valid_backends}"
|
||||||
f"Available: {', '.join(_BACKEND_CONFIG.keys())}"
|
) from e
|
||||||
)
|
|
||||||
return _BACKEND_CONFIG[backend]
|
return {"backend_class": backend_class}
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def log_warnings_and_errors_only():
|
||||||
|
"""Temporarily set vLLM logger to WARNING level."""
|
||||||
|
logger = logging.getLogger("vllm")
|
||||||
|
old_level = logger.level
|
||||||
|
logger.setLevel(logging.WARNING)
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
logger.setLevel(old_level)
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
@@ -88,11 +99,7 @@ def _build_common_attn_metadata(
|
|||||||
query_start_loc_cpu = query_start_loc.cpu()
|
query_start_loc_cpu = query_start_loc.cpu()
|
||||||
|
|
||||||
seq_lens = torch.tensor(kv_lens, dtype=torch.int32, device=device)
|
seq_lens = torch.tensor(kv_lens, dtype=torch.int32, device=device)
|
||||||
seq_lens_cpu = seq_lens.cpu()
|
max_seq_len = int(seq_lens.max().item())
|
||||||
max_seq_len = int(seq_lens_cpu.max())
|
|
||||||
|
|
||||||
context_lens = [kv - q for kv, q in zip(kv_lens, q_lens)]
|
|
||||||
num_computed_tokens_cpu = torch.tensor(context_lens, dtype=torch.int32)
|
|
||||||
|
|
||||||
max_blocks = (max(kv_lens) + block_size - 1) // block_size
|
max_blocks = (max(kv_lens) + block_size - 1) // block_size
|
||||||
num_blocks = batch_size * max_blocks
|
num_blocks = batch_size * max_blocks
|
||||||
@@ -107,8 +114,6 @@ def _build_common_attn_metadata(
|
|||||||
query_start_loc=query_start_loc,
|
query_start_loc=query_start_loc,
|
||||||
query_start_loc_cpu=query_start_loc_cpu,
|
query_start_loc_cpu=query_start_loc_cpu,
|
||||||
seq_lens=seq_lens,
|
seq_lens=seq_lens,
|
||||||
seq_lens_cpu=seq_lens_cpu,
|
|
||||||
num_computed_tokens_cpu=num_computed_tokens_cpu,
|
|
||||||
num_reqs=batch_size,
|
num_reqs=batch_size,
|
||||||
num_actual_tokens=total_tokens,
|
num_actual_tokens=total_tokens,
|
||||||
max_query_len=max_query_len,
|
max_query_len=max_query_len,
|
||||||
@@ -121,7 +126,6 @@ def _build_common_attn_metadata(
|
|||||||
|
|
||||||
def _create_vllm_config(
|
def _create_vllm_config(
|
||||||
config: BenchmarkConfig,
|
config: BenchmarkConfig,
|
||||||
dtype: torch.dtype,
|
|
||||||
max_num_blocks: int,
|
max_num_blocks: int,
|
||||||
) -> VllmConfig:
|
) -> VllmConfig:
|
||||||
"""Create a VllmConfig for benchmarking with mock model methods."""
|
"""Create a VllmConfig for benchmarking with mock model methods."""
|
||||||
@@ -129,7 +133,7 @@ def _create_vllm_config(
|
|||||||
model="meta-llama/Meta-Llama-3-8B",
|
model="meta-llama/Meta-Llama-3-8B",
|
||||||
tokenizer="meta-llama/Meta-Llama-3-8B",
|
tokenizer="meta-llama/Meta-Llama-3-8B",
|
||||||
trust_remote_code=False,
|
trust_remote_code=False,
|
||||||
dtype=dtype,
|
dtype="auto", # Use model's native dtype
|
||||||
seed=0,
|
seed=0,
|
||||||
max_model_len=1024,
|
max_model_len=1024,
|
||||||
)
|
)
|
||||||
@@ -198,15 +202,12 @@ def _create_backend_impl(
|
|||||||
backend_cfg: dict,
|
backend_cfg: dict,
|
||||||
config: BenchmarkConfig,
|
config: BenchmarkConfig,
|
||||||
device: torch.device,
|
device: torch.device,
|
||||||
|
dtype: torch.dtype,
|
||||||
):
|
):
|
||||||
"""Create backend implementation instance."""
|
"""Create backend implementation instance."""
|
||||||
import importlib
|
backend_class = backend_cfg["backend_class"]
|
||||||
|
|
||||||
backend_module = importlib.import_module(backend_cfg["module"])
|
|
||||||
backend_class = getattr(backend_module, backend_cfg["backend_class"])
|
|
||||||
|
|
||||||
scale = get_attention_scale(config.head_dim)
|
scale = get_attention_scale(config.head_dim)
|
||||||
dtype = backend_cfg["dtype"]
|
|
||||||
|
|
||||||
impl = backend_class.get_impl_cls()(
|
impl = backend_class.get_impl_cls()(
|
||||||
num_heads=config.num_q_heads,
|
num_heads=config.num_q_heads,
|
||||||
@@ -227,7 +228,7 @@ def _create_backend_impl(
|
|||||||
|
|
||||||
layer = MockLayer(device, kv_cache_spec=kv_cache_spec)
|
layer = MockLayer(device, kv_cache_spec=kv_cache_spec)
|
||||||
|
|
||||||
return backend_class, impl, layer, dtype
|
return backend_class, impl, layer
|
||||||
|
|
||||||
|
|
||||||
def _create_metadata_builder(
|
def _create_metadata_builder(
|
||||||
@@ -235,11 +236,44 @@ def _create_metadata_builder(
|
|||||||
kv_cache_spec: FullAttentionSpec,
|
kv_cache_spec: FullAttentionSpec,
|
||||||
vllm_config: VllmConfig,
|
vllm_config: VllmConfig,
|
||||||
device: torch.device,
|
device: torch.device,
|
||||||
|
backend_name: str = "",
|
||||||
):
|
):
|
||||||
"""Create metadata builder instance."""
|
"""Create metadata builder instance."""
|
||||||
return backend_class.get_builder_cls()(
|
layer_names = ["layer_0"]
|
||||||
|
builder_cls = backend_class.get_builder_cls()
|
||||||
|
|
||||||
|
# Flashinfer needs get_per_layer_parameters mocked since we don't have
|
||||||
|
# real model layers registered
|
||||||
|
if backend_name == "FLASHINFER":
|
||||||
|
import unittest.mock
|
||||||
|
|
||||||
|
from vllm.v1.attention.backends.utils import PerLayerParameters
|
||||||
|
|
||||||
|
def mock_get_per_layer_parameters(vllm_config, layer_names, impl_cls):
|
||||||
|
head_size = vllm_config.model_config.get_head_size()
|
||||||
|
return {
|
||||||
|
layer_name: PerLayerParameters(
|
||||||
|
window_left=-1, # No sliding window
|
||||||
|
logits_soft_cap=0.0, # No soft cap
|
||||||
|
sm_scale=1.0 / (head_size**0.5), # Standard scale
|
||||||
|
)
|
||||||
|
for layer_name in layer_names
|
||||||
|
}
|
||||||
|
|
||||||
|
with unittest.mock.patch(
|
||||||
|
"vllm.v1.attention.backends.flashinfer.get_per_layer_parameters",
|
||||||
|
mock_get_per_layer_parameters,
|
||||||
|
):
|
||||||
|
return builder_cls(
|
||||||
|
kv_cache_spec=kv_cache_spec,
|
||||||
|
layer_names=layer_names,
|
||||||
|
vllm_config=vllm_config,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
|
||||||
|
return builder_cls(
|
||||||
kv_cache_spec=kv_cache_spec,
|
kv_cache_spec=kv_cache_spec,
|
||||||
layer_names=["layer_0"],
|
layer_names=layer_names,
|
||||||
vllm_config=vllm_config,
|
vllm_config=vllm_config,
|
||||||
device=device,
|
device=device,
|
||||||
)
|
)
|
||||||
@@ -281,39 +315,44 @@ def _create_input_tensors(
|
|||||||
def _create_kv_cache(
|
def _create_kv_cache(
|
||||||
config: BenchmarkConfig,
|
config: BenchmarkConfig,
|
||||||
max_num_blocks: int,
|
max_num_blocks: int,
|
||||||
cache_layout: str,
|
backend_class,
|
||||||
device: torch.device,
|
device: torch.device,
|
||||||
dtype: torch.dtype,
|
dtype: torch.dtype,
|
||||||
) -> list:
|
) -> list:
|
||||||
"""Create KV cache tensors for all layers."""
|
"""Create KV cache tensors for all layers using the backend's methods.
|
||||||
if cache_layout == "flashinfer":
|
|
||||||
# FlashInfer layout: [num_blocks, 2, block_size, num_kv_heads, head_dim]
|
Uses the backend's get_kv_cache_shape() and get_kv_cache_stride_order()
|
||||||
cache_list = [
|
to create the cache with the correct shape and memory layout.
|
||||||
torch.zeros(
|
"""
|
||||||
max_num_blocks,
|
# Get the logical shape from the backend
|
||||||
2,
|
cache_shape = backend_class.get_kv_cache_shape(
|
||||||
config.block_size,
|
num_blocks=max_num_blocks,
|
||||||
config.num_kv_heads,
|
block_size=config.block_size,
|
||||||
config.head_dim,
|
num_kv_heads=config.num_kv_heads,
|
||||||
device=device,
|
head_size=config.head_dim,
|
||||||
dtype=dtype,
|
)
|
||||||
)
|
|
||||||
for _ in range(config.num_layers)
|
# Get the stride order for custom memory layout
|
||||||
]
|
try:
|
||||||
else:
|
stride_order = backend_class.get_kv_cache_stride_order()
|
||||||
# Standard layout: [2, num_blocks, block_size, num_kv_heads, head_dim]
|
assert len(stride_order) == len(cache_shape)
|
||||||
cache_list = [
|
except (AttributeError, NotImplementedError):
|
||||||
torch.zeros(
|
stride_order = tuple(range(len(cache_shape)))
|
||||||
2,
|
|
||||||
max_num_blocks,
|
# Permute shape to physical layout order
|
||||||
config.block_size,
|
physical_shape = tuple(cache_shape[i] for i in stride_order)
|
||||||
config.num_kv_heads,
|
|
||||||
config.head_dim,
|
# Compute inverse permutation to get back to logical view
|
||||||
device=device,
|
inv_order = [stride_order.index(i) for i in range(len(stride_order))]
|
||||||
dtype=dtype,
|
|
||||||
)
|
cache_list = []
|
||||||
for _ in range(config.num_layers)
|
for _ in range(config.num_layers):
|
||||||
]
|
# Allocate in physical layout order (contiguous in memory)
|
||||||
|
cache = torch.zeros(*physical_shape, device=device, dtype=dtype)
|
||||||
|
# Permute to logical view
|
||||||
|
cache = cache.permute(*inv_order)
|
||||||
|
cache_list.append(cache)
|
||||||
|
|
||||||
return cache_list
|
return cache_list
|
||||||
|
|
||||||
|
|
||||||
@@ -396,7 +435,7 @@ def run_attention_benchmark(config: BenchmarkConfig) -> BenchmarkResult:
|
|||||||
"""
|
"""
|
||||||
Run standard attention benchmark with real kernels.
|
Run standard attention benchmark with real kernels.
|
||||||
|
|
||||||
Supports: flash, triton, flashinfer
|
Supports: FLASH_ATTN, TRITON_ATTN, FLASHINFER
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
config: Benchmark configuration
|
config: Benchmark configuration
|
||||||
@@ -411,60 +450,79 @@ def run_attention_benchmark(config: BenchmarkConfig) -> BenchmarkResult:
|
|||||||
|
|
||||||
requests = parse_batch_spec(config.batch_spec)
|
requests = parse_batch_spec(config.batch_spec)
|
||||||
|
|
||||||
if config.backend == "flashinfer":
|
if config.backend == "FLASHINFER":
|
||||||
requests = reorder_for_flashinfer(requests)
|
requests = reorder_for_flashinfer(requests)
|
||||||
|
|
||||||
q_lens = [r.q_len for r in requests]
|
q_lens = [r.q_len for r in requests]
|
||||||
kv_lens = [r.kv_len for r in requests]
|
kv_lens = [r.kv_len for r in requests]
|
||||||
total_q = sum(q_lens)
|
total_q = sum(q_lens)
|
||||||
max_kv = max(kv_lens)
|
max_kv = max(kv_lens)
|
||||||
|
batch_size = len(q_lens)
|
||||||
|
|
||||||
max_num_blocks = (max_kv + config.block_size - 1) // config.block_size
|
# Calculate total blocks needed: batch_size * max_blocks_per_request
|
||||||
|
max_blocks_per_request = (max_kv + config.block_size - 1) // config.block_size
|
||||||
|
max_num_blocks = batch_size * max_blocks_per_request
|
||||||
|
|
||||||
backend_class, impl, layer, dtype = _create_backend_impl(
|
# Suppress vLLM logs during setup to reduce spam
|
||||||
backend_cfg, config, device
|
with log_warnings_and_errors_only():
|
||||||
)
|
# Create vllm_config first - uses model's native dtype via "auto"
|
||||||
|
vllm_config = _create_vllm_config(config, max_num_blocks)
|
||||||
|
dtype = vllm_config.model_config.dtype
|
||||||
|
|
||||||
common_metadata = _build_common_attn_metadata(
|
# Wrap everything in set_current_vllm_config context
|
||||||
q_lens, kv_lens, config.block_size, device
|
# This is required for backends like flashinfer that need global config
|
||||||
)
|
with set_current_vllm_config(vllm_config):
|
||||||
|
backend_class, impl, layer = _create_backend_impl(
|
||||||
|
backend_cfg, config, device, dtype
|
||||||
|
)
|
||||||
|
|
||||||
kv_cache_spec = FullAttentionSpec(
|
# Set KV cache layout if the backend requires a specific one
|
||||||
block_size=config.block_size,
|
# (e.g., FlashInfer requires HND on SM100/Blackwell for TRTLLM attention)
|
||||||
num_kv_heads=config.num_kv_heads,
|
required_layout = backend_class.get_required_kv_cache_layout()
|
||||||
head_size=config.head_dim,
|
if required_layout is not None:
|
||||||
dtype=dtype,
|
set_kv_cache_layout(required_layout)
|
||||||
)
|
get_kv_cache_layout.cache_clear()
|
||||||
|
|
||||||
vllm_config = _create_vllm_config(config, dtype, max_num_blocks)
|
common_metadata = _build_common_attn_metadata(
|
||||||
|
q_lens, kv_lens, config.block_size, device
|
||||||
|
)
|
||||||
|
|
||||||
builder = _create_metadata_builder(
|
kv_cache_spec = FullAttentionSpec(
|
||||||
backend_class, kv_cache_spec, vllm_config, device
|
block_size=config.block_size,
|
||||||
)
|
num_kv_heads=config.num_kv_heads,
|
||||||
|
head_size=config.head_dim,
|
||||||
|
dtype=dtype,
|
||||||
|
)
|
||||||
|
|
||||||
attn_metadata = builder.build(
|
builder = _create_metadata_builder(
|
||||||
common_prefix_len=0,
|
backend_class, kv_cache_spec, vllm_config, device, config.backend
|
||||||
common_attn_metadata=common_metadata,
|
)
|
||||||
)
|
|
||||||
|
|
||||||
q_list, k_list, v_list = _create_input_tensors(config, total_q, device, dtype)
|
attn_metadata = builder.build(
|
||||||
|
common_prefix_len=0,
|
||||||
|
common_attn_metadata=common_metadata,
|
||||||
|
)
|
||||||
|
|
||||||
cache_list = _create_kv_cache(
|
q_list, k_list, v_list = _create_input_tensors(
|
||||||
config, max_num_blocks, backend_cfg["cache_layout"], device, dtype
|
config, total_q, device, dtype
|
||||||
)
|
)
|
||||||
|
|
||||||
times, mem_stats = _run_single_benchmark(
|
cache_list = _create_kv_cache(
|
||||||
config,
|
config, max_num_blocks, backend_class, device, dtype
|
||||||
impl,
|
)
|
||||||
layer,
|
|
||||||
q_list,
|
times, mem_stats = _run_single_benchmark(
|
||||||
k_list,
|
config,
|
||||||
v_list,
|
impl,
|
||||||
cache_list,
|
layer,
|
||||||
attn_metadata,
|
q_list,
|
||||||
device,
|
k_list,
|
||||||
dtype,
|
v_list,
|
||||||
)
|
cache_list,
|
||||||
|
attn_metadata,
|
||||||
|
device,
|
||||||
|
dtype,
|
||||||
|
)
|
||||||
|
|
||||||
mean_time = np.mean(times)
|
mean_time = np.mean(times)
|
||||||
throughput = total_q / mean_time if mean_time > 0 else 0
|
throughput = total_q / mean_time if mean_time > 0 else 0
|
||||||
|
|||||||
@@ -46,10 +46,10 @@ echo "VLLM_LOGGING_LEVEL=$VLLM_LOGGING_LEVEL"
|
|||||||
echo "RESULT_FILE=$RESULT"
|
echo "RESULT_FILE=$RESULT"
|
||||||
echo "====================== AUTO TUNEPARAMETERS ===================="
|
echo "====================== AUTO TUNEPARAMETERS ===================="
|
||||||
|
|
||||||
rm -rf $LOG_FOLDER
|
rm -rf "$LOG_FOLDER"
|
||||||
rm -rf $PROFILE_PATH
|
rm -rf "$PROFILE_PATH"
|
||||||
mkdir -p $LOG_FOLDER
|
mkdir -p "$LOG_FOLDER"
|
||||||
mkdir -p $PROFILE_PATH
|
mkdir -p "$PROFILE_PATH"
|
||||||
|
|
||||||
cd "$BASE/vllm"
|
cd "$BASE/vllm"
|
||||||
|
|
||||||
@@ -85,7 +85,6 @@ start_server() {
|
|||||||
# Each argument and its value are separate elements.
|
# Each argument and its value are separate elements.
|
||||||
local common_args_array=(
|
local common_args_array=(
|
||||||
"$MODEL"
|
"$MODEL"
|
||||||
"--disable-log-requests"
|
|
||||||
"--port" "8004"
|
"--port" "8004"
|
||||||
"--host" "$HOSTNAME"
|
"--host" "$HOSTNAME"
|
||||||
"--gpu-memory-utilization" "$gpu_memory_utilization"
|
"--gpu-memory-utilization" "$gpu_memory_utilization"
|
||||||
@@ -114,7 +113,7 @@ start_server() {
|
|||||||
|
|
||||||
# wait for 10 minutes...
|
# wait for 10 minutes...
|
||||||
server_started=0
|
server_started=0
|
||||||
for i in {1..60}; do
|
for _ in {1..60}; do
|
||||||
# This line checks whether the server is still alive or not,
|
# This line checks whether the server is still alive or not,
|
||||||
# since that we should always have permission to send signal to the server process.
|
# since that we should always have permission to send signal to the server process.
|
||||||
kill -0 $server_pid 2> /dev/null || break
|
kill -0 $server_pid 2> /dev/null || break
|
||||||
@@ -145,12 +144,12 @@ run_benchmark() {
|
|||||||
local vllm_log="$LOG_FOLDER/vllm_log_${max_num_seqs}_${max_num_batched_tokens}.txt"
|
local vllm_log="$LOG_FOLDER/vllm_log_${max_num_seqs}_${max_num_batched_tokens}.txt"
|
||||||
echo "vllm_log: $vllm_log"
|
echo "vllm_log: $vllm_log"
|
||||||
echo
|
echo
|
||||||
rm -f $vllm_log
|
rm -f "$vllm_log"
|
||||||
pkill -if "vllm serve" || true
|
pkill -if "vllm serve" || true
|
||||||
|
|
||||||
echo "starting server..."
|
echo "starting server..."
|
||||||
# Call start_server without a profile_dir to avoid profiling overhead
|
# Call start_server without a profile_dir to avoid profiling overhead
|
||||||
start_server $gpu_memory_utilization $max_num_seqs $max_num_batched_tokens $vllm_log ""
|
start_server "$gpu_memory_utilization" "$max_num_seqs" "$max_num_batched_tokens" "$vllm_log" ""
|
||||||
result=$?
|
result=$?
|
||||||
if [[ "$result" -eq 1 ]]; then
|
if [[ "$result" -eq 1 ]]; then
|
||||||
echo "server failed to start. gpu_memory_utilization:$gpu_memory_utilization, max_num_seqs:$max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens"
|
echo "server failed to start. gpu_memory_utilization:$gpu_memory_utilization, max_num_seqs:$max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens"
|
||||||
@@ -168,15 +167,15 @@ run_benchmark() {
|
|||||||
# --profile flag is removed from this call
|
# --profile flag is removed from this call
|
||||||
vllm bench serve \
|
vllm bench serve \
|
||||||
--backend vllm \
|
--backend vllm \
|
||||||
--model $MODEL \
|
--model "$MODEL" \
|
||||||
--dataset-name random \
|
--dataset-name random \
|
||||||
--random-input-len $adjusted_input_len \
|
--random-input-len $adjusted_input_len \
|
||||||
--random-output-len $OUTPUT_LEN \
|
--random-output-len "$OUTPUT_LEN" \
|
||||||
--ignore-eos \
|
--ignore-eos \
|
||||||
--disable-tqdm \
|
--disable-tqdm \
|
||||||
--request-rate inf \
|
--request-rate inf \
|
||||||
--percentile-metrics ttft,tpot,itl,e2el \
|
--percentile-metrics ttft,tpot,itl,e2el \
|
||||||
--goodput e2el:$MAX_LATENCY_ALLOWED_MS \
|
--goodput e2el:"$MAX_LATENCY_ALLOWED_MS" \
|
||||||
--num-prompts 1000 \
|
--num-prompts 1000 \
|
||||||
--random-prefix-len $prefix_len \
|
--random-prefix-len $prefix_len \
|
||||||
--host "$HOSTNAME" \
|
--host "$HOSTNAME" \
|
||||||
@@ -195,20 +194,20 @@ run_benchmark() {
|
|||||||
request_rate=$((${throughput%.*} + 1))
|
request_rate=$((${throughput%.*} + 1))
|
||||||
while ((request_rate > 0)); do
|
while ((request_rate > 0)); do
|
||||||
# clear prefix cache
|
# clear prefix cache
|
||||||
curl -X POST http://${HOSTNAME}:8004/reset_prefix_cache
|
curl -X POST http://"${HOSTNAME}":8004/reset_prefix_cache
|
||||||
sleep 5
|
sleep 5
|
||||||
bm_log="$LOG_FOLDER/bm_log_${max_num_seqs}_${max_num_batched_tokens}_requestrate_${request_rate}.txt"
|
bm_log="$LOG_FOLDER/bm_log_${max_num_seqs}_${max_num_batched_tokens}_requestrate_${request_rate}.txt"
|
||||||
vllm bench serve \
|
vllm bench serve \
|
||||||
--backend vllm \
|
--backend vllm \
|
||||||
--model $MODEL \
|
--model "$MODEL" \
|
||||||
--dataset-name random \
|
--dataset-name random \
|
||||||
--random-input-len $adjusted_input_len \
|
--random-input-len $adjusted_input_len \
|
||||||
--random-output-len $OUTPUT_LEN \
|
--random-output-len "$OUTPUT_LEN" \
|
||||||
--ignore-eos \
|
--ignore-eos \
|
||||||
--disable-tqdm \
|
--disable-tqdm \
|
||||||
--request-rate $request_rate \
|
--request-rate $request_rate \
|
||||||
--percentile-metrics ttft,tpot,itl,e2el \
|
--percentile-metrics ttft,tpot,itl,e2el \
|
||||||
--goodput e2el:$MAX_LATENCY_ALLOWED_MS \
|
--goodput e2el:"$MAX_LATENCY_ALLOWED_MS" \
|
||||||
--num-prompts 100 \
|
--num-prompts 100 \
|
||||||
--random-prefix-len $prefix_len \
|
--random-prefix-len $prefix_len \
|
||||||
--host "$HOSTNAME" \
|
--host "$HOSTNAME" \
|
||||||
@@ -255,7 +254,7 @@ gpu_memory_utilization=0.98
|
|||||||
find_gpu_memory_utilization=0
|
find_gpu_memory_utilization=0
|
||||||
while (( $(echo "$gpu_memory_utilization >= 0.9" | bc -l) )); do
|
while (( $(echo "$gpu_memory_utilization >= 0.9" | bc -l) )); do
|
||||||
# Pass empty string for profile_dir argument
|
# Pass empty string for profile_dir argument
|
||||||
start_server $gpu_memory_utilization "${num_seqs_list[-1]}" "${num_batched_tokens_list[-1]}" "$LOG_FOLDER/vllm_log_gpu_memory_utilization_$gpu_memory_utilization.log" ""
|
start_server "$gpu_memory_utilization" "${num_seqs_list[-1]}" "${num_batched_tokens_list[-1]}" "$LOG_FOLDER/vllm_log_gpu_memory_utilization_$gpu_memory_utilization.log" ""
|
||||||
result=$?
|
result=$?
|
||||||
if [[ "$result" -eq 0 ]]; then
|
if [[ "$result" -eq 0 ]]; then
|
||||||
find_gpu_memory_utilization=1
|
find_gpu_memory_utilization=1
|
||||||
@@ -274,7 +273,7 @@ fi
|
|||||||
|
|
||||||
for num_seqs in "${num_seqs_list[@]}"; do
|
for num_seqs in "${num_seqs_list[@]}"; do
|
||||||
for num_batched_tokens in "${num_batched_tokens_list[@]}"; do
|
for num_batched_tokens in "${num_batched_tokens_list[@]}"; do
|
||||||
run_benchmark $num_seqs $num_batched_tokens $gpu_memory_utilization
|
run_benchmark "$num_seqs" "$num_batched_tokens" "$gpu_memory_utilization"
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
echo "finish permutations"
|
echo "finish permutations"
|
||||||
@@ -285,7 +284,7 @@ echo "finish permutations"
|
|||||||
if (( $(echo "$best_throughput > 0" | bc -l) )); then
|
if (( $(echo "$best_throughput > 0" | bc -l) )); then
|
||||||
echo
|
echo
|
||||||
echo "Benchmark tuning finished. Now running profiling on the best configuration found..."
|
echo "Benchmark tuning finished. Now running profiling on the best configuration found..."
|
||||||
echo "Best config: max_num_seqs: $best_max_num_seqs, max_num_batched_tokens: $best_num_batched_tokens, throughput: $best_throughput"
|
echo "Best config: max_num_seqs: $best_max_num_seqs, max_num_batched_tokens: $best_num_batched_tokens, throughput: $best_throughput, goodput: $best_goodput"
|
||||||
echo
|
echo
|
||||||
|
|
||||||
vllm_log="$LOG_FOLDER/vllm_log_BEST_PROFILE.txt"
|
vllm_log="$LOG_FOLDER/vllm_log_BEST_PROFILE.txt"
|
||||||
@@ -293,7 +292,7 @@ if (( $(echo "$best_throughput > 0" | bc -l) )); then
|
|||||||
|
|
||||||
# Start server with the best params and profiling ENABLED
|
# Start server with the best params and profiling ENABLED
|
||||||
echo "Starting server for profiling..."
|
echo "Starting server for profiling..."
|
||||||
start_server $gpu_memory_utilization $best_max_num_seqs $best_num_batched_tokens "$vllm_log" "$PROFILE_PATH"
|
start_server "$gpu_memory_utilization" "$best_max_num_seqs" "$best_num_batched_tokens" "$vllm_log" "$PROFILE_PATH"
|
||||||
|
|
||||||
# Run benchmark with the best params and the --profile flag
|
# Run benchmark with the best params and the --profile flag
|
||||||
echo "Running benchmark with profiling..."
|
echo "Running benchmark with profiling..."
|
||||||
@@ -301,15 +300,15 @@ if (( $(echo "$best_throughput > 0" | bc -l) )); then
|
|||||||
adjusted_input_len=$(( INPUT_LEN - prefix_len ))
|
adjusted_input_len=$(( INPUT_LEN - prefix_len ))
|
||||||
vllm bench serve \
|
vllm bench serve \
|
||||||
--backend vllm \
|
--backend vllm \
|
||||||
--model $MODEL \
|
--model "$MODEL" \
|
||||||
--dataset-name random \
|
--dataset-name random \
|
||||||
--random-input-len $adjusted_input_len \
|
--random-input-len $adjusted_input_len \
|
||||||
--random-output-len $OUTPUT_LEN \
|
--random-output-len "$OUTPUT_LEN" \
|
||||||
--ignore-eos \
|
--ignore-eos \
|
||||||
--disable-tqdm \
|
--disable-tqdm \
|
||||||
--request-rate $best_request_rate \
|
--request-rate "$best_request_rate" \
|
||||||
--percentile-metrics ttft,tpot,itl,e2el \
|
--percentile-metrics ttft,tpot,itl,e2el \
|
||||||
--goodput e2el:$MAX_LATENCY_ALLOWED_MS \
|
--goodput e2el:"$MAX_LATENCY_ALLOWED_MS" \
|
||||||
--num-prompts 100 \
|
--num-prompts 100 \
|
||||||
--random-prefix-len $prefix_len \
|
--random-prefix-len $prefix_len \
|
||||||
--host "$HOSTNAME" \
|
--host "$HOSTNAME" \
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ for i in $(seq 0 $(($num_runs - 1))); do
|
|||||||
else
|
else
|
||||||
STATUS="FAILURE"
|
STATUS="FAILURE"
|
||||||
((FAILURE_COUNT++))
|
((FAILURE_COUNT++))
|
||||||
FAILED_RUNS+=("Run #$((i+1)): $(echo $run_object | jq -c .)")
|
FAILED_RUNS+=("Run #$((i+1)): $(echo "$run_object" | jq -c .)")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RUN_OUTPUT=$(<"$RUN_OUTPUT_FILE")
|
RUN_OUTPUT=$(<"$RUN_OUTPUT_FILE")
|
||||||
|
|||||||
@@ -649,9 +649,3 @@ ASYNC_REQUEST_FUNCS = {
|
|||||||
"sglang": async_request_openai_completions,
|
"sglang": async_request_openai_completions,
|
||||||
"llama.cpp": async_request_openai_completions,
|
"llama.cpp": async_request_openai_completions,
|
||||||
}
|
}
|
||||||
|
|
||||||
OPENAI_COMPATIBLE_BACKENDS = [
|
|
||||||
k
|
|
||||||
for k, v in ASYNC_REQUEST_FUNCS.items()
|
|
||||||
if v in (async_request_openai_completions, async_request_openai_chat_completions)
|
|
||||||
]
|
|
||||||
|
|||||||
471
benchmarks/benchmark_topk_topp.py
Normal file
471
benchmarks/benchmark_topk_topp.py
Normal file
@@ -0,0 +1,471 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
"""
|
||||||
|
Benchmark comparing Triton vs PyTorch sort-based top-k/top-p implementations.
|
||||||
|
|
||||||
|
Compares:
|
||||||
|
- apply_top_k_top_p_triton (Triton binary search)
|
||||||
|
- apply_top_k_top_p (PyTorch sort-based)
|
||||||
|
|
||||||
|
Scenarios:
|
||||||
|
- top_k only (whole batch, partial batch)
|
||||||
|
- top_p only (whole batch, partial batch)
|
||||||
|
- mix of top_k and top_p
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import gc
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from vllm.v1.sample.ops.topk_topp_sampler import apply_top_k_top_p_pytorch
|
||||||
|
from vllm.v1.sample.ops.topk_topp_triton import (
|
||||||
|
apply_top_k_top_p_triton,
|
||||||
|
reset_buffer_cache,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BenchmarkConfig:
|
||||||
|
"""Configuration for a benchmark run."""
|
||||||
|
|
||||||
|
name: str
|
||||||
|
batch_size: int
|
||||||
|
vocab_size: int
|
||||||
|
# k and p can be tensors or None
|
||||||
|
k_values: torch.Tensor | None # [batch_size] or None
|
||||||
|
p_values: torch.Tensor | None # [batch_size] or None
|
||||||
|
description: str
|
||||||
|
ops_pct: float = 0.0 # Percentage of ops relative to batch size
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_ops_pct(
|
||||||
|
k_values: torch.Tensor | None,
|
||||||
|
p_values: torch.Tensor | None,
|
||||||
|
vocab_size: int,
|
||||||
|
batch_size: int,
|
||||||
|
) -> float:
|
||||||
|
"""
|
||||||
|
Calculate the percentage of active top-k and top-p operations.
|
||||||
|
|
||||||
|
Returns percentage where 100% = batch_size ops.
|
||||||
|
E.g., if all rows have both top-k and top-p active, returns 200%.
|
||||||
|
"""
|
||||||
|
active_ops = 0
|
||||||
|
|
||||||
|
if k_values is not None:
|
||||||
|
# Count rows where k < vocab_size (active top-k filtering)
|
||||||
|
active_ops += (k_values < vocab_size).sum().item()
|
||||||
|
|
||||||
|
if p_values is not None:
|
||||||
|
# Count rows where p < 1.0 (active top-p filtering)
|
||||||
|
active_ops += (p_values < 1.0).sum().item()
|
||||||
|
|
||||||
|
return (active_ops / batch_size) * 100 if batch_size > 0 else 0.0
|
||||||
|
|
||||||
|
|
||||||
|
def create_logits(
|
||||||
|
batch_size: int, vocab_size: int, device: str = "cuda"
|
||||||
|
) -> torch.Tensor:
|
||||||
|
"""Create random logits mimicking a realistic LLM distribution.
|
||||||
|
|
||||||
|
Uses a Zipf-like probability distribution (rank^-1.1) converted to logits
|
||||||
|
via log, then randomly permuted per row. This produces a peaked distribution
|
||||||
|
where a small number of tokens capture most probability mass, similar to
|
||||||
|
real model outputs.
|
||||||
|
"""
|
||||||
|
# Create Zipf-like probabilities: p(rank) ~ rank^(-alpha)
|
||||||
|
ranks = torch.arange(1, vocab_size + 1, dtype=torch.float32, device=device)
|
||||||
|
probs = ranks.pow(-1.1)
|
||||||
|
probs = probs / probs.sum()
|
||||||
|
|
||||||
|
# Convert to logits (log-probabilities, unnormalized is fine)
|
||||||
|
base_logits = probs.log()
|
||||||
|
|
||||||
|
# Broadcast to batch and randomly permute each row
|
||||||
|
logits = base_logits.unsqueeze(0).expand(batch_size, -1).clone()
|
||||||
|
for i in range(batch_size):
|
||||||
|
logits[i] = logits[i, torch.randperm(vocab_size, device=device)]
|
||||||
|
|
||||||
|
return logits
|
||||||
|
|
||||||
|
|
||||||
|
def measure_memory() -> tuple[int, int]:
|
||||||
|
"""Return (allocated, reserved) memory in bytes."""
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
return torch.cuda.memory_allocated(), torch.cuda.max_memory_allocated()
|
||||||
|
|
||||||
|
|
||||||
|
def reset_memory_stats():
|
||||||
|
"""Reset peak memory statistics."""
|
||||||
|
reset_buffer_cache()
|
||||||
|
torch.cuda.reset_peak_memory_stats()
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
gc.collect()
|
||||||
|
|
||||||
|
|
||||||
|
def benchmark_function(
|
||||||
|
func,
|
||||||
|
logits: torch.Tensor,
|
||||||
|
k: torch.Tensor | None,
|
||||||
|
p: torch.Tensor | None,
|
||||||
|
warmup_iters: int = 5,
|
||||||
|
benchmark_iters: int = 20,
|
||||||
|
) -> tuple[float, int]:
|
||||||
|
"""
|
||||||
|
Benchmark a function and return (avg_time_ms, peak_memory_bytes).
|
||||||
|
|
||||||
|
Returns average time in milliseconds and peak memory usage.
|
||||||
|
"""
|
||||||
|
# Warmup
|
||||||
|
for _ in range(warmup_iters):
|
||||||
|
logits_copy = logits.clone()
|
||||||
|
func(logits_copy, k, p)
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
|
||||||
|
# Reset memory stats before benchmark
|
||||||
|
reset_memory_stats()
|
||||||
|
|
||||||
|
# Benchmark
|
||||||
|
start_events = [
|
||||||
|
torch.cuda.Event(enable_timing=True) for _ in range(benchmark_iters)
|
||||||
|
]
|
||||||
|
end_events = [torch.cuda.Event(enable_timing=True) for _ in range(benchmark_iters)]
|
||||||
|
|
||||||
|
for i in range(benchmark_iters):
|
||||||
|
logits_copy = logits.clone()
|
||||||
|
start_events[i].record()
|
||||||
|
func(logits_copy, k, p)
|
||||||
|
end_events[i].record()
|
||||||
|
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
|
||||||
|
# Calculate timing
|
||||||
|
times = [
|
||||||
|
start_events[i].elapsed_time(end_events[i]) for i in range(benchmark_iters)
|
||||||
|
]
|
||||||
|
avg_time = sum(times) / len(times)
|
||||||
|
|
||||||
|
# Get peak memory
|
||||||
|
_, peak_memory = measure_memory()
|
||||||
|
|
||||||
|
return avg_time, peak_memory
|
||||||
|
|
||||||
|
|
||||||
|
def create_benchmark_configs(
|
||||||
|
batch_sizes: list[int],
|
||||||
|
vocab_sizes: list[int],
|
||||||
|
device: str = "cuda",
|
||||||
|
) -> list[BenchmarkConfig]:
|
||||||
|
"""Create all benchmark configurations."""
|
||||||
|
configs = []
|
||||||
|
|
||||||
|
for vocab_size in vocab_sizes:
|
||||||
|
for batch_size in batch_sizes:
|
||||||
|
# 1. Top-k only - whole batch (all rows have k < vocab_size)
|
||||||
|
k_all = torch.full((batch_size,), 50, dtype=torch.int32, device=device)
|
||||||
|
configs.append(
|
||||||
|
BenchmarkConfig(
|
||||||
|
name=f"topk_whole_b{batch_size}_v{vocab_size // 1000}k",
|
||||||
|
batch_size=batch_size,
|
||||||
|
vocab_size=vocab_size,
|
||||||
|
k_values=k_all,
|
||||||
|
p_values=None,
|
||||||
|
description=f"Top-k only (whole batch, k=50), "
|
||||||
|
f"batch={batch_size}, vocab={vocab_size}",
|
||||||
|
ops_pct=calculate_ops_pct(k_all, None, vocab_size, batch_size),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# 2. Top-k only - partial batch (half have k=50, half have k=vocab_size)
|
||||||
|
k_partial = torch.full((batch_size,), 50, dtype=torch.int32, device=device)
|
||||||
|
k_partial[batch_size // 2 :] = vocab_size # No filtering for second half
|
||||||
|
configs.append(
|
||||||
|
BenchmarkConfig(
|
||||||
|
name=f"topk_partial_b{batch_size}_v{vocab_size // 1000}k",
|
||||||
|
batch_size=batch_size,
|
||||||
|
vocab_size=vocab_size,
|
||||||
|
k_values=k_partial,
|
||||||
|
p_values=None,
|
||||||
|
description=f"Top-k only (partial batch, 50% k=50, 50% k=vocab), "
|
||||||
|
f"batch={batch_size}, vocab={vocab_size}",
|
||||||
|
ops_pct=calculate_ops_pct(k_partial, None, vocab_size, batch_size),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. Top-p only - whole batch (all rows have p < 1.0)
|
||||||
|
p_all = torch.full((batch_size,), 0.9, dtype=torch.float32, device=device)
|
||||||
|
configs.append(
|
||||||
|
BenchmarkConfig(
|
||||||
|
name=f"topp_whole_b{batch_size}_v{vocab_size // 1000}k",
|
||||||
|
batch_size=batch_size,
|
||||||
|
vocab_size=vocab_size,
|
||||||
|
k_values=None,
|
||||||
|
p_values=p_all,
|
||||||
|
description=f"Top-p only (whole batch, p=0.9), "
|
||||||
|
f"batch={batch_size}, vocab={vocab_size}",
|
||||||
|
ops_pct=calculate_ops_pct(None, p_all, vocab_size, batch_size),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# 4. Top-p only - partial batch (half have p=0.9, half have p=1.0)
|
||||||
|
p_partial = torch.full(
|
||||||
|
(batch_size,), 0.9, dtype=torch.float32, device=device
|
||||||
|
)
|
||||||
|
p_partial[batch_size // 2 :] = 1.0 # No filtering for second half
|
||||||
|
configs.append(
|
||||||
|
BenchmarkConfig(
|
||||||
|
name=f"topp_partial_b{batch_size}_v{vocab_size // 1000}k",
|
||||||
|
batch_size=batch_size,
|
||||||
|
vocab_size=vocab_size,
|
||||||
|
k_values=None,
|
||||||
|
p_values=p_partial,
|
||||||
|
description=f"Top-p only (partial batch, 50% p=0.9, 50% p=1.0), "
|
||||||
|
f"batch={batch_size}, vocab={vocab_size}",
|
||||||
|
ops_pct=calculate_ops_pct(None, p_partial, vocab_size, batch_size),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# 5. Mix of top-k and top-p (both applied to whole batch)
|
||||||
|
k_mix = torch.full((batch_size,), 100, dtype=torch.int32, device=device)
|
||||||
|
p_mix = torch.full((batch_size,), 0.9, dtype=torch.float32, device=device)
|
||||||
|
configs.append(
|
||||||
|
BenchmarkConfig(
|
||||||
|
name=f"topk_topp_whole_b{batch_size}_v{vocab_size // 1000}k",
|
||||||
|
batch_size=batch_size,
|
||||||
|
vocab_size=vocab_size,
|
||||||
|
k_values=k_mix,
|
||||||
|
p_values=p_mix,
|
||||||
|
description=f"Top-k + Top-p (whole batch, k=100, p=0.9), "
|
||||||
|
f"batch={batch_size}, vocab={vocab_size}",
|
||||||
|
ops_pct=calculate_ops_pct(k_mix, p_mix, vocab_size, batch_size),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# 6. Mix with partial application (some rows k only, some p only, some both)
|
||||||
|
k_mixed = torch.full(
|
||||||
|
(batch_size,), vocab_size, dtype=torch.int32, device=device
|
||||||
|
)
|
||||||
|
p_mixed = torch.full((batch_size,), 1.0, dtype=torch.float32, device=device)
|
||||||
|
# First third: k only
|
||||||
|
third = batch_size // 3
|
||||||
|
k_mixed[:third] = 50
|
||||||
|
# Second third: p only
|
||||||
|
p_mixed[third : 2 * third] = 0.5
|
||||||
|
# Last third: both k and p
|
||||||
|
k_mixed[2 * third :] = 100
|
||||||
|
p_mixed[2 * third :] = 0.9
|
||||||
|
configs.append(
|
||||||
|
BenchmarkConfig(
|
||||||
|
name=f"mixed_partial_b{batch_size}_v{vocab_size // 1000}k",
|
||||||
|
batch_size=batch_size,
|
||||||
|
vocab_size=vocab_size,
|
||||||
|
k_values=k_mixed,
|
||||||
|
p_values=p_mixed,
|
||||||
|
description=f"Mixed partial (1/3 k=50, 1/3 p=0.9, 1/3 both), "
|
||||||
|
f"batch={batch_size}, vocab={vocab_size}",
|
||||||
|
ops_pct=calculate_ops_pct(k_mixed, p_mixed, vocab_size, batch_size),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return configs
|
||||||
|
|
||||||
|
|
||||||
|
def format_memory(bytes_val: int) -> str:
|
||||||
|
"""Format memory in human-readable form."""
|
||||||
|
if bytes_val >= 1024**3:
|
||||||
|
return f"{bytes_val / (1024**3):.2f} GB"
|
||||||
|
elif bytes_val >= 1024**2:
|
||||||
|
return f"{bytes_val / (1024**2):.2f} MB"
|
||||||
|
elif bytes_val >= 1024:
|
||||||
|
return f"{bytes_val / 1024:.2f} KB"
|
||||||
|
return f"{bytes_val} B"
|
||||||
|
|
||||||
|
|
||||||
|
def run_benchmark(
|
||||||
|
configs: list[BenchmarkConfig],
|
||||||
|
warmup_iters: int = 5,
|
||||||
|
benchmark_iters: int = 20,
|
||||||
|
verbose: bool = True,
|
||||||
|
):
|
||||||
|
"""Run all benchmarks and print results."""
|
||||||
|
results = []
|
||||||
|
|
||||||
|
print("=" * 100)
|
||||||
|
print("Top-k/Top-p Benchmark: Triton vs PyTorch Sort-based")
|
||||||
|
print("=" * 100)
|
||||||
|
print()
|
||||||
|
|
||||||
|
for config in configs:
|
||||||
|
if verbose:
|
||||||
|
print(f"Running: {config.description}")
|
||||||
|
|
||||||
|
# Create fresh logits for this config
|
||||||
|
logits = create_logits(config.batch_size, config.vocab_size)
|
||||||
|
|
||||||
|
# Benchmark Triton
|
||||||
|
reset_memory_stats()
|
||||||
|
triton_time, triton_mem = benchmark_function(
|
||||||
|
apply_top_k_top_p_triton,
|
||||||
|
logits,
|
||||||
|
config.k_values,
|
||||||
|
config.p_values,
|
||||||
|
warmup_iters,
|
||||||
|
benchmark_iters,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Benchmark PyTorch
|
||||||
|
reset_memory_stats()
|
||||||
|
pytorch_time, pytorch_mem = benchmark_function(
|
||||||
|
apply_top_k_top_p_pytorch,
|
||||||
|
logits,
|
||||||
|
config.k_values,
|
||||||
|
config.p_values,
|
||||||
|
warmup_iters,
|
||||||
|
benchmark_iters,
|
||||||
|
)
|
||||||
|
|
||||||
|
speedup = pytorch_time / triton_time if triton_time > 0 else float("inf")
|
||||||
|
mem_ratio = pytorch_mem / triton_mem if triton_mem > 0 else float("inf")
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"config": config,
|
||||||
|
"triton_time_ms": triton_time,
|
||||||
|
"pytorch_time_ms": pytorch_time,
|
||||||
|
"triton_mem": triton_mem,
|
||||||
|
"pytorch_mem": pytorch_mem,
|
||||||
|
"speedup": speedup,
|
||||||
|
"mem_ratio": mem_ratio,
|
||||||
|
}
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
print(f" Triton: {triton_time:.3f} ms, {format_memory(triton_mem)}")
|
||||||
|
print(f" PyTorch: {pytorch_time:.3f} ms, {format_memory(pytorch_mem)}")
|
||||||
|
print(f" Speedup: {speedup:.2f}x, Memory ratio: {mem_ratio:.2f}x")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
del logits
|
||||||
|
reset_memory_stats()
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def print_summary_table(results: list[dict]):
|
||||||
|
"""Print a summary table of results."""
|
||||||
|
print()
|
||||||
|
print("=" * 130)
|
||||||
|
print("SUMMARY TABLE")
|
||||||
|
print("=" * 130)
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Header
|
||||||
|
header = (
|
||||||
|
f"{'Scenario':<40} {'Batch':>6} {'Vocab':>7} {'Ops%':>6} "
|
||||||
|
f"{'Triton (ms)':>12} {'PyTorch (ms)':>13} {'Speedup':>8} "
|
||||||
|
f"{'Tri Mem':>10} {'Pyt Mem':>10}"
|
||||||
|
)
|
||||||
|
print(header)
|
||||||
|
print("-" * 130)
|
||||||
|
|
||||||
|
# Group by scenario type
|
||||||
|
current_vocab = None
|
||||||
|
for result in results:
|
||||||
|
config = result["config"]
|
||||||
|
|
||||||
|
# Add separator between vocab sizes
|
||||||
|
if current_vocab != config.vocab_size:
|
||||||
|
if current_vocab is not None:
|
||||||
|
print("-" * 130)
|
||||||
|
current_vocab = config.vocab_size
|
||||||
|
|
||||||
|
scenario = config.name.split("_b")[0] # Extract scenario name
|
||||||
|
print(
|
||||||
|
f"{scenario:<40} {config.batch_size:>6} {config.vocab_size:>7} "
|
||||||
|
f"{config.ops_pct:>5.0f}% "
|
||||||
|
f"{result['triton_time_ms']:>12.3f} {result['pytorch_time_ms']:>13.3f} "
|
||||||
|
f"{result['speedup']:>7.2f}x "
|
||||||
|
f"{format_memory(result['triton_mem']):>10} "
|
||||||
|
f"{format_memory(result['pytorch_mem']):>10}"
|
||||||
|
)
|
||||||
|
|
||||||
|
print("=" * 130)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Benchmark Triton vs PyTorch sort-based top-k/top-p implementations"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--batch-sizes",
|
||||||
|
type=int,
|
||||||
|
nargs="+",
|
||||||
|
default=[1, 4, 16, 64, 128, 512, 1024, 2048],
|
||||||
|
help="Batch sizes to test (default: 1 4 16 64)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--vocab-sizes",
|
||||||
|
type=int,
|
||||||
|
nargs="+",
|
||||||
|
default=[32768, 131072], # 32k, 128k
|
||||||
|
help="Vocabulary sizes to test (default: 32768 131072)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--warmup-iters",
|
||||||
|
type=int,
|
||||||
|
default=5,
|
||||||
|
help="Number of warmup iterations (default: 5)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--benchmark-iters",
|
||||||
|
type=int,
|
||||||
|
default=20,
|
||||||
|
help="Number of benchmark iterations (default: 20)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--quiet",
|
||||||
|
action="store_true",
|
||||||
|
help="Only print summary table",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Print configuration
|
||||||
|
print(f"Batch sizes: {args.batch_sizes}")
|
||||||
|
print(f"Vocab sizes: {args.vocab_sizes}")
|
||||||
|
print(f"Warmup iterations: {args.warmup_iters}")
|
||||||
|
print(f"Benchmark iterations: {args.benchmark_iters}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Check CUDA
|
||||||
|
if not torch.cuda.is_available():
|
||||||
|
print("ERROR: CUDA is not available. This benchmark requires a GPU.")
|
||||||
|
return
|
||||||
|
|
||||||
|
device_name = torch.cuda.get_device_name(0)
|
||||||
|
print(f"GPU: {device_name}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Create configs
|
||||||
|
configs = create_benchmark_configs(
|
||||||
|
args.batch_sizes,
|
||||||
|
args.vocab_sizes,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run benchmarks
|
||||||
|
results = run_benchmark(
|
||||||
|
configs,
|
||||||
|
warmup_iters=args.warmup_iters,
|
||||||
|
benchmark_iters=args.benchmark_iters,
|
||||||
|
verbose=not args.quiet,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Print summary
|
||||||
|
print_summary_table(results)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@@ -1,78 +1,7 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import math
|
|
||||||
import os
|
|
||||||
import time
|
import time
|
||||||
from types import TracebackType
|
from types import TracebackType
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
|
|
||||||
def convert_to_pytorch_benchmark_format(
|
|
||||||
args: argparse.Namespace, metrics: dict[str, list], extra_info: dict[str, Any]
|
|
||||||
) -> list:
|
|
||||||
"""
|
|
||||||
Save the benchmark results in the format used by PyTorch OSS benchmark with
|
|
||||||
on metric per record
|
|
||||||
https://github.com/pytorch/pytorch/wiki/How-to-integrate-with-PyTorch-OSS-benchmark-database
|
|
||||||
"""
|
|
||||||
records = []
|
|
||||||
if not os.environ.get("SAVE_TO_PYTORCH_BENCHMARK_FORMAT", False):
|
|
||||||
return records
|
|
||||||
|
|
||||||
for name, benchmark_values in metrics.items():
|
|
||||||
record = {
|
|
||||||
"benchmark": {
|
|
||||||
"name": "vLLM benchmark",
|
|
||||||
"extra_info": {
|
|
||||||
"args": vars(args),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"model": {
|
|
||||||
"name": args.model,
|
|
||||||
},
|
|
||||||
"metric": {
|
|
||||||
"name": name,
|
|
||||||
"benchmark_values": benchmark_values,
|
|
||||||
"extra_info": extra_info,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
tp = record["benchmark"]["extra_info"]["args"].get("tensor_parallel_size")
|
|
||||||
# Save tensor_parallel_size parameter if it's part of the metadata
|
|
||||||
if not tp and "tensor_parallel_size" in extra_info:
|
|
||||||
record["benchmark"]["extra_info"]["args"]["tensor_parallel_size"] = (
|
|
||||||
extra_info["tensor_parallel_size"]
|
|
||||||
)
|
|
||||||
|
|
||||||
records.append(record)
|
|
||||||
|
|
||||||
return records
|
|
||||||
|
|
||||||
|
|
||||||
class InfEncoder(json.JSONEncoder):
|
|
||||||
def clear_inf(self, o: Any):
|
|
||||||
if isinstance(o, dict):
|
|
||||||
return {k: self.clear_inf(v) for k, v in o.items()}
|
|
||||||
elif isinstance(o, list):
|
|
||||||
return [self.clear_inf(v) for v in o]
|
|
||||||
elif isinstance(o, float) and math.isinf(o):
|
|
||||||
return "inf"
|
|
||||||
return o
|
|
||||||
|
|
||||||
def iterencode(self, o: Any, *args, **kwargs) -> Any:
|
|
||||||
return super().iterencode(self.clear_inf(o), *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def write_to_json(filename: str, records: list) -> None:
|
|
||||||
with open(filename, "w") as f:
|
|
||||||
json.dump(
|
|
||||||
records,
|
|
||||||
f,
|
|
||||||
cls=InfEncoder,
|
|
||||||
default=lambda o: f"<{type(o).__name__} object is not JSON serializable>",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Collect time and generate time metrics
|
# Collect time and generate time metrics
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||||
|
|
||||||
# Cutlass bench utils
|
# Cutlass bench utils
|
||||||
from collections.abc import Iterable
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
@@ -86,15 +85,3 @@ def make_rand_sparse_tensors(
|
|||||||
|
|
||||||
# Compressed B, Metadata, Original A, B
|
# Compressed B, Metadata, Original A, B
|
||||||
return b_compressed, e, a, b
|
return b_compressed, e, a, b
|
||||||
|
|
||||||
|
|
||||||
def make_n_rand_sparse_tensors(
|
|
||||||
num_tensors: int, dtype: torch.dtype, m: int, n: int, k: int
|
|
||||||
) -> tuple[Iterable[torch.Tensor], Iterable[torch.Tensor]]:
|
|
||||||
ABs = []
|
|
||||||
for _ in range(num_tensors):
|
|
||||||
b_comp, e, a, b = make_rand_sparse_tensors(dtype, m, n, k)
|
|
||||||
if b_comp is not None:
|
|
||||||
ABs.append(make_rand_sparse_tensors(dtype, m, n, k))
|
|
||||||
BComps, Es, As, Bs = zip(*ABs)
|
|
||||||
return list(BComps), list(Es), list(As), list(Bs)
|
|
||||||
|
|||||||
@@ -1,45 +0,0 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
class RateLimiter:
|
|
||||||
"""Token bucket rate limiter implementation"""
|
|
||||||
|
|
||||||
def __init__(self, rate_limit):
|
|
||||||
self.rate_limit = rate_limit # Requests per second
|
|
||||||
self.num_available_tokens = rate_limit # Available tokens
|
|
||||||
self.last_refill = time.monotonic() # Last token refill time
|
|
||||||
self.lock = asyncio.Lock() # Synchronization lock
|
|
||||||
|
|
||||||
async def acquire(self):
|
|
||||||
"""Acquire a token from the rate limiter"""
|
|
||||||
while True:
|
|
||||||
async with self.lock:
|
|
||||||
current_time = time.monotonic()
|
|
||||||
elapsed = current_time - self.last_refill
|
|
||||||
|
|
||||||
# Refill num_available_tokens if more than 1 second has passed
|
|
||||||
if elapsed > 1.0:
|
|
||||||
self.num_available_tokens = self.rate_limit
|
|
||||||
self.last_refill = current_time
|
|
||||||
|
|
||||||
# Check if num_available_tokens are available
|
|
||||||
if self.num_available_tokens > 0:
|
|
||||||
self.num_available_tokens -= 1
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Calculate wait time if no num_available_tokens available
|
|
||||||
wait_time = 1.0 - elapsed
|
|
||||||
await asyncio.sleep(wait_time)
|
|
||||||
|
|
||||||
async def __aenter__(self):
|
|
||||||
"""Enter async context manager - acquire token"""
|
|
||||||
await self.acquire()
|
|
||||||
return self
|
|
||||||
|
|
||||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
|
||||||
"""Exit async context manager - no cleanup needed"""
|
|
||||||
pass
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
# SPDX-License-Identifier: Apache-2.0
|
|
||||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
from collections import deque
|
|
||||||
|
|
||||||
|
|
||||||
class RequestQueue:
|
|
||||||
"""Request queue manager with concurrency control"""
|
|
||||||
|
|
||||||
def __init__(self, max_concurrent, max_queue_size):
|
|
||||||
# Maximum concurrent requests
|
|
||||||
self.max_concurrent = max_concurrent
|
|
||||||
self.max_queue_size = max_queue_size # Maximum queue size
|
|
||||||
# Concurrency control
|
|
||||||
self.semaphore = asyncio.Semaphore(max_concurrent)
|
|
||||||
self.queue = deque() # Request queue
|
|
||||||
self.queue_size = 0 # Current queue size
|
|
||||||
self.lock = asyncio.Lock() # Sync queue Lock
|
|
||||||
|
|
||||||
async def enqueue(self, task):
|
|
||||||
"""Add a request task to the queue"""
|
|
||||||
async with self.lock:
|
|
||||||
if self.queue_size >= self.max_queue_size:
|
|
||||||
return False
|
|
||||||
|
|
||||||
self.queue.append(task)
|
|
||||||
self.queue_size += 1
|
|
||||||
return True
|
|
||||||
|
|
||||||
async def process(self):
|
|
||||||
"""Process queued requests using semaphore for concurrency control"""
|
|
||||||
while True:
|
|
||||||
if self.queue:
|
|
||||||
async with self.semaphore, self.lock:
|
|
||||||
task = self.queue.popleft()
|
|
||||||
self.queue_size -= 1
|
|
||||||
await task
|
|
||||||
await asyncio.sleep(0.01) # Yield control to event loop
|
|
||||||
@@ -13,6 +13,7 @@ from torch.utils.benchmark import Measurement as TMeasurement
|
|||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
import vllm._custom_ops as ops
|
import vllm._custom_ops as ops
|
||||||
|
from vllm.benchmarks.lib.utils import default_vllm_config
|
||||||
from vllm.model_executor.layers.layernorm import RMSNorm
|
from vllm.model_executor.layers.layernorm import RMSNorm
|
||||||
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
|
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
|
||||||
per_token_group_quant_fp8,
|
per_token_group_quant_fp8,
|
||||||
@@ -291,6 +292,7 @@ def print_timers(timers: Iterable[TMeasurement]):
|
|||||||
compare.print()
|
compare.print()
|
||||||
|
|
||||||
|
|
||||||
|
@default_vllm_config()
|
||||||
def main():
|
def main():
|
||||||
torch.set_default_device("cuda")
|
torch.set_default_device("cuda")
|
||||||
bench_params = get_bench_params()
|
bench_params = get_bench_params()
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import itertools
|
|||||||
import torch
|
import torch
|
||||||
|
|
||||||
import vllm.model_executor.layers.activation # noqa F401
|
import vllm.model_executor.layers.activation # noqa F401
|
||||||
|
from vllm.benchmarks.lib.utils import default_vllm_config
|
||||||
from vllm.model_executor.custom_op import op_registry
|
from vllm.model_executor.custom_op import op_registry
|
||||||
from vllm.triton_utils import triton
|
from vllm.triton_utils import triton
|
||||||
from vllm.utils.argparse_utils import FlexibleArgumentParser
|
from vllm.utils.argparse_utils import FlexibleArgumentParser
|
||||||
@@ -18,6 +19,7 @@ intermediate_size = [3072, 9728, 12288]
|
|||||||
configs = list(itertools.product(batch_size_range, seq_len_range, intermediate_size))
|
configs = list(itertools.product(batch_size_range, seq_len_range, intermediate_size))
|
||||||
|
|
||||||
|
|
||||||
|
@default_vllm_config()
|
||||||
def benchmark_activation(
|
def benchmark_activation(
|
||||||
batch_size: int,
|
batch_size: int,
|
||||||
seq_len: int,
|
seq_len: int,
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user