[bugfix] fix type[AttentionBackend] bug in kv_connector_base_v1 (#30051)
Signed-off-by: 01267596 <xiongkai123@cmbchina.com> Co-authored-by: 01267596 <xiongkai123@cmbchina.com>
This commit is contained in:
@@ -239,7 +239,7 @@ class KVConnectorBase_V1(ABC):
|
||||
return
|
||||
|
||||
def register_cross_layers_kv_cache(
|
||||
self, kv_cache: torch.Tensor, attn_backend: type[AttentionBackend]
|
||||
self, kv_cache: torch.Tensor, attn_backend: type["AttentionBackend"]
|
||||
):
|
||||
"""
|
||||
Initialize with a single KV cache tensor used by all layers.
|
||||
|
||||
Reference in New Issue
Block a user