[Kernel][LoRA]Punica prefill kernels fusion (#11234)
Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: Abatom <abzhonghua@gmail.com> Co-authored-by: Zhonghua Deng <abatom@163.com>
This commit is contained in:
@@ -18,11 +18,13 @@ class DummyLoRAManager:
|
||||
def get_module_lora(self, module_name: str) -> LoRALayerWeights:
|
||||
return self._loras[module_name]
|
||||
|
||||
def init_random_lora(self,
|
||||
module_name: str,
|
||||
weight: torch.Tensor,
|
||||
rank: int = 8,
|
||||
generate_embeddings_tensor: int = 0):
|
||||
def init_random_lora(
|
||||
self,
|
||||
module_name: str,
|
||||
weight: torch.Tensor,
|
||||
rank: int = 8,
|
||||
generate_embeddings_tensor: int = 0,
|
||||
):
|
||||
lora = LoRALayerWeights(
|
||||
module_name,
|
||||
rank=rank,
|
||||
@@ -35,21 +37,25 @@ class DummyLoRAManager:
|
||||
device=self._device),
|
||||
)
|
||||
if generate_embeddings_tensor:
|
||||
lora.embeddings_tensor = torch.rand(5,
|
||||
generate_embeddings_tensor,
|
||||
dtype=weight.dtype,
|
||||
device=self._device)
|
||||
lora.embeddings_tensor = torch.rand(
|
||||
5,
|
||||
generate_embeddings_tensor,
|
||||
dtype=weight.dtype,
|
||||
device=self._device,
|
||||
)
|
||||
self.set_module_lora(module_name, lora)
|
||||
|
||||
return lora
|
||||
|
||||
def init_lora(self,
|
||||
module_name: str,
|
||||
input_dim: int,
|
||||
output_dim: int,
|
||||
rank=8,
|
||||
noop=False,
|
||||
embeddings_tensor=None):
|
||||
def init_lora(
|
||||
self,
|
||||
module_name: str,
|
||||
input_dim: int,
|
||||
output_dim: int,
|
||||
rank=8,
|
||||
noop=False,
|
||||
embeddings_tensor=None,
|
||||
):
|
||||
lora = LoRALayerWeights(
|
||||
module_name,
|
||||
rank=rank,
|
||||
@@ -125,8 +131,16 @@ def ref_torch_groupgemm(
|
||||
return
|
||||
|
||||
|
||||
def generate_data(batches, hidden_size, lora_nums, max_rank, seq_length, dtype,
|
||||
op_type, device):
|
||||
def generate_data(
|
||||
batches,
|
||||
hidden_size,
|
||||
lora_nums,
|
||||
max_rank,
|
||||
seq_length,
|
||||
dtype,
|
||||
op_type,
|
||||
device,
|
||||
):
|
||||
seq_len_tensor = torch.randint(seq_length, seq_length + 1,
|
||||
(batches, )).to(device)
|
||||
b_seq_start_loc = torch.cumsum(
|
||||
@@ -187,8 +201,16 @@ def generate_data(batches, hidden_size, lora_nums, max_rank, seq_length, dtype,
|
||||
)
|
||||
|
||||
|
||||
def generate_data_for_expand_nslices(batches, hidden_size, lora_nums, max_rank,
|
||||
seq_length, dtype, nslices, device):
|
||||
def generate_data_for_expand_nslices(
|
||||
batches,
|
||||
hidden_size,
|
||||
lora_nums,
|
||||
max_rank,
|
||||
seq_length,
|
||||
dtype,
|
||||
nslices,
|
||||
device,
|
||||
):
|
||||
seq_len_tensor = torch.randint(seq_length, seq_length + 1,
|
||||
(batches, )).to(device)
|
||||
b_seq_start_loc = torch.cumsum(
|
||||
@@ -221,7 +243,87 @@ def generate_data_for_expand_nslices(batches, hidden_size, lora_nums, max_rank,
|
||||
for b_id in range(batches):
|
||||
lora_index = lora_indices_tensor[b_id]
|
||||
indices[current_offset:current_offset +
|
||||
seq_len_tensor[b_id]] = lora_index.item()
|
||||
seq_len_tensor[b_id]] = (lora_index.item())
|
||||
current_offset += seq_len_tensor[b_id].item()
|
||||
|
||||
lora_indices_tensor = lora_indices_tensor.to(device)
|
||||
return (
|
||||
inputs_tensor,
|
||||
lora_weights_lst,
|
||||
our_out_tensor,
|
||||
ref_out_tensor,
|
||||
b_seq_start_loc,
|
||||
lora_indices_tensor,
|
||||
seq_len_tensor,
|
||||
indices,
|
||||
)
|
||||
|
||||
|
||||
def generate_data_for_nslices(
|
||||
batches,
|
||||
hidden_size,
|
||||
lora_nums,
|
||||
max_rank,
|
||||
seq_length,
|
||||
nslices,
|
||||
dtype,
|
||||
op_type,
|
||||
device,
|
||||
):
|
||||
seq_len_tensor = torch.randint(seq_length, seq_length + 1,
|
||||
(batches, )).to(device)
|
||||
b_seq_start_loc = torch.cumsum(
|
||||
torch.tensor([0] + seq_len_tensor[:-1].tolist(), dtype=torch.long),
|
||||
dim=0,
|
||||
).to(device)
|
||||
total_tokens = seq_len_tensor.sum()
|
||||
|
||||
lora_weights_lst = []
|
||||
if op_type == "shrink":
|
||||
|
||||
inputs_tensor = torch.rand((total_tokens, hidden_size),
|
||||
dtype=dtype).to(device)
|
||||
|
||||
for _ in range(nslices):
|
||||
if op_type == "shrink":
|
||||
lora_weights_lst.append(
|
||||
torch.rand(
|
||||
(lora_nums, max_rank, hidden_size), # col-major
|
||||
dtype=dtype,
|
||||
).to(device))
|
||||
# NOTE shrink kernel using torch.float32 as output type
|
||||
# shrink op need atomic_add, so output is initinized by 0
|
||||
our_out_tensor = torch.zeros(
|
||||
(nslices, total_tokens, max_rank),
|
||||
dtype=torch.float32,
|
||||
).to(device)
|
||||
else:
|
||||
inputs_tensor = torch.rand(
|
||||
(nslices, total_tokens, max_rank),
|
||||
dtype=dtype,
|
||||
).to(device)
|
||||
for _ in range(nslices):
|
||||
lora_weights_lst.append(
|
||||
torch.rand(
|
||||
(lora_nums, hidden_size, max_rank), # col-major
|
||||
dtype=dtype,
|
||||
).to(device))
|
||||
# expand op needs to complete y+=a@lora_b, so output is
|
||||
# initinized randomly
|
||||
our_out_tensor = torch.rand((total_tokens, hidden_size * nslices),
|
||||
dtype=dtype).to(device)
|
||||
|
||||
# Ensure the same input.
|
||||
ref_out_tensor = our_out_tensor.clone()
|
||||
lora_indices_tensor = torch.randint(0,
|
||||
lora_nums - 1 if lora_nums > 1 else 1,
|
||||
(batches, ))
|
||||
indices = torch.zeros((total_tokens), dtype=torch.long).to(device)
|
||||
current_offset = 0
|
||||
for b_id in range(batches):
|
||||
lora_index = lora_indices_tensor[b_id]
|
||||
indices[current_offset:current_offset +
|
||||
seq_len_tensor[b_id]] = (lora_index.item())
|
||||
current_offset += seq_len_tensor[b_id].item()
|
||||
|
||||
lora_indices_tensor = lora_indices_tensor.to(device)
|
||||
|
||||
Reference in New Issue
Block a user