[Bugfix][CI] fix typos (#34934)

Signed-off-by: 1195343015 <1195343015@qq.com>
Signed-off-by: Jiayi Yan <66017932+1195343015@users.noreply.github.com>
Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Jiayi Yan
2026-03-06 01:05:46 +08:00
committed by GitHub
parent 8c760b6ab6
commit 6a895197fa
98 changed files with 227 additions and 366 deletions

View File

@@ -370,7 +370,7 @@ class AiterFlashAttentionMetadata:
slot_mapping: torch.Tensor
block_table: torch.Tensor
# prefill and deocde split
# prefill and decode split
num_decodes: int
num_decode_tokens: int
num_prefills: int
@@ -1099,7 +1099,7 @@ class AiterFlashAttentionImpl(AttentionImpl):
extend_tokens_slice = slice(
num_decode_tokens, num_decode_tokens + num_extend_tokens
)
extend_querys = query[extend_tokens_slice]
extend_queries = query[extend_tokens_slice]
extend_keys = key[extend_tokens_slice]
extend_values = value[extend_tokens_slice]
extend_outputs = output[extend_tokens_slice]
@@ -1110,7 +1110,7 @@ class AiterFlashAttentionImpl(AttentionImpl):
v_scale = attn_metadata.v_scale
self.extend_forward(
attn_metadata=attn_metadata,
query=extend_querys,
query=extend_queries,
key=extend_keys,
value=extend_values,
key_cache=key_cache,