[Kernel] optimize moe_align_block_size for cuda graph and large num_experts (e.g. DeepSeek-V3) (#12222)
Signed-off-by: Jinzhen Lin <linjinzhen@hotmail.com> Co-authored-by: Michael Goin <mgoin@redhat.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com>
This commit is contained in:
@@ -607,7 +607,7 @@ class ModelConfig:
|
||||
self.max_seq_len_to_capture = min(self.max_seq_len_to_capture,
|
||||
self.max_model_len)
|
||||
|
||||
MODEL_NOT_SUPPORT_CUDA_GRAPH = ['deepseek_v3', 'mllama']
|
||||
MODEL_NOT_SUPPORT_CUDA_GRAPH = ['mllama']
|
||||
if (self.hf_config.model_type in MODEL_NOT_SUPPORT_CUDA_GRAPH
|
||||
and not self.enforce_eager):
|
||||
logger.warning(
|
||||
|
||||
Reference in New Issue
Block a user