Add contributing guideline and mypy config (#122)

This commit is contained in:
Woosuk Kwon
2023-05-23 17:58:51 -07:00
committed by GitHub
parent 3f942acfe1
commit a283ec2eec
16 changed files with 128 additions and 44 deletions

View File

@@ -109,7 +109,7 @@ class LlamaAttention(nn.Module):
def forward(
self,
positions: torch.LongTensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
kv_cache: KVCache,
input_metadata: InputMetadata,
@@ -143,7 +143,7 @@ class LlamaDecoderLayer(nn.Module):
def forward(
self,
positions: torch.LongTensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
kv_cache: KVCache,
input_metadata: InputMetadata,
@@ -184,8 +184,8 @@ class LlamaModel(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
positions: torch.LongTensor,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],
@@ -222,8 +222,8 @@ class LlamaForCausalLM(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
positions: torch.LongTensor,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],