MI325 configs, fused_moe_kernel bugfix (#14987)

Signed-off-by: Eugene Kuznetsov <eugene.kuznetsov@amd.com>
This commit is contained in:
ekuznetsov139
2025-03-18 08:05:18 -07:00
committed by GitHub
parent af35d3a3cc
commit 8b793f7ec6
54 changed files with 8878 additions and 1 deletions

View File

@@ -783,8 +783,12 @@ def invoke_fused_moe_kernel(A: torch.Tensor,
use_int8_w8a16=use_int8_w8a16,
**config,
)
else:
config = config.copy()
BLOCK_SIZE_K = config.pop("BLOCK_SIZE_K")
if block_shape is not None:
BLOCK_SIZE_K = min(BLOCK_SIZE_K, min(block_shape[0],
block_shape[1]))
fused_moe_kernel[grid](
A,
B,
@@ -823,6 +827,7 @@ def invoke_fused_moe_kernel(A: torch.Tensor,
compute_type=compute_type,
use_fp8_w8a8=use_fp8_w8a8,
use_int8_w8a16=use_int8_w8a16,
BLOCK_SIZE_K=BLOCK_SIZE_K,
**config,
)