Add contributing guideline and mypy config (#122)

This commit is contained in:
Woosuk Kwon
2023-05-23 17:58:51 -07:00
committed by GitHub
parent 3f942acfe1
commit a283ec2eec
16 changed files with 128 additions and 44 deletions

View File

@@ -168,8 +168,8 @@ class GPT2Model(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
position_ids: torch.LongTensor,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],
@@ -204,8 +204,8 @@ class GPT2LMHeadModel(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
positions: torch.LongTensor,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],

View File

@@ -67,7 +67,7 @@ class GPTNeoXAttention(nn.Module):
def forward(
self,
position_ids: torch.LongTensor,
position_ids: torch.Tensor,
hidden_states: torch.Tensor,
kv_cache: KVCache,
input_metadata: InputMetadata,
@@ -118,7 +118,7 @@ class GPTNeoXLayer(nn.Module):
def forward(
self,
position_ids: torch.LongTensor,
position_ids: torch.Tensor,
hidden_states: torch.Tensor,
kv_cache: KVCache,
input_metadata: InputMetadata,
@@ -162,8 +162,8 @@ class GPTNeoXModel(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
position_ids: torch.LongTensor,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],
@@ -199,8 +199,8 @@ class GPTNeoXForCausalLM(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
positions: torch.LongTensor,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],

View File

@@ -109,7 +109,7 @@ class LlamaAttention(nn.Module):
def forward(
self,
positions: torch.LongTensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
kv_cache: KVCache,
input_metadata: InputMetadata,
@@ -143,7 +143,7 @@ class LlamaDecoderLayer(nn.Module):
def forward(
self,
positions: torch.LongTensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
kv_cache: KVCache,
input_metadata: InputMetadata,
@@ -184,8 +184,8 @@ class LlamaModel(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
positions: torch.LongTensor,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],
@@ -222,8 +222,8 @@ class LlamaForCausalLM(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
positions: torch.LongTensor,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],

View File

@@ -47,7 +47,7 @@ class OPTLearnedPositionalEmbedding(nn.Embedding):
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(self, positions: torch.LongTensor):
def forward(self, positions: torch.Tensor):
return super().forward(positions + self.offset)
@@ -199,8 +199,8 @@ class OPTDecoder(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
positions: torch.LongTensor,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],
@@ -235,8 +235,8 @@ class OPTModel(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
positions: torch.LongTensor,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],
@@ -258,8 +258,8 @@ class OPTForCausalLM(nn.Module):
def forward(
self,
input_ids: torch.LongTensor,
positions: torch.LongTensor,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],