[Bugfix] Enable FP8 KV cache for FlashInfer and Triton backend on non-sm100 GPUs (#24577)
Signed-off-by: Thien Tran <gau.nernst@yahoo.com.sg>
This commit is contained in:
@@ -530,6 +530,10 @@ class CudaPlatformBase(Platform):
|
|||||||
supported = flash_attn_supports_fp8()
|
supported = flash_attn_supports_fp8()
|
||||||
else:
|
else:
|
||||||
supported = True
|
supported = True
|
||||||
|
elif attention_backend == "FLASHINFER":
|
||||||
|
supported = True
|
||||||
|
elif attention_backend == "TRITON_ATTN_VLLM_V1":
|
||||||
|
supported = cls.supports_fp8()
|
||||||
return supported
|
return supported
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|||||||
@@ -202,7 +202,11 @@ class FlashInferMetadataBuilder(AttentionMetadataBuilder[FlashInferMetadata]):
|
|||||||
else:
|
else:
|
||||||
assert self.kv_cache_spec.dtype == self.model_config.dtype
|
assert self.kv_cache_spec.dtype == self.model_config.dtype
|
||||||
self.kv_cache_dtype = self.kv_cache_spec.dtype
|
self.kv_cache_dtype = self.kv_cache_spec.dtype
|
||||||
self.q_data_type = self.kv_cache_dtype
|
|
||||||
|
if supports_trtllm_attention()[0]:
|
||||||
|
self.q_data_type = self.kv_cache_dtype
|
||||||
|
else:
|
||||||
|
self.q_data_type = self.model_config.dtype
|
||||||
|
|
||||||
self._cascade_wrapper = None # Wrapper for cascade attention
|
self._cascade_wrapper = None # Wrapper for cascade attention
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user