[Bugfix] Fix precision loss in LoRA-wrapped RowParallelLinear by fusing bias into GEMM (#28972)
Signed-off-by: prashanth058 <prashanth.dannamaneni@uipath.com>
This commit is contained in:
@@ -63,23 +63,18 @@ class RowParallelLinearWithLoRA(BaseLinearLayerWithLoRA):
|
|||||||
input_parallel = splitted_input[self.tp_rank].contiguous()
|
input_parallel = splitted_input[self.tp_rank].contiguous()
|
||||||
|
|
||||||
# Matrix multiply.
|
# Matrix multiply.
|
||||||
output_parallel = self.apply(input_parallel)
|
bias_ = (
|
||||||
|
None
|
||||||
|
if (self.tp_rank > 0 or self.base_layer.skip_bias_add)
|
||||||
|
else self.base_layer.bias
|
||||||
|
)
|
||||||
|
output_parallel = self.apply(input_parallel, bias_)
|
||||||
if self.base_layer.reduce_results and self.tp_size > 1:
|
if self.base_layer.reduce_results and self.tp_size > 1:
|
||||||
output_ = tensor_model_parallel_all_reduce(output_parallel)
|
output = tensor_model_parallel_all_reduce(output_parallel)
|
||||||
else:
|
else:
|
||||||
output_ = output_parallel
|
output = output_parallel
|
||||||
|
|
||||||
if not self.base_layer.skip_bias_add:
|
|
||||||
output = (
|
|
||||||
output_ + self.base_layer.bias
|
|
||||||
if self.base_layer.bias is not None
|
|
||||||
else output_
|
|
||||||
)
|
|
||||||
output_bias = None
|
|
||||||
else:
|
|
||||||
output = output_
|
|
||||||
output_bias = self.base_layer.bias
|
|
||||||
|
|
||||||
|
output_bias = self.base_layer.bias if self.base_layer.skip_bias_add else None
|
||||||
if not self.base_layer.return_bias:
|
if not self.base_layer.return_bias:
|
||||||
return output
|
return output
|
||||||
|
|
||||||
@@ -120,7 +115,7 @@ class RowParallelLinearWithShardedLoRA(RowParallelLinearWithLoRA):
|
|||||||
return lora_b
|
return lora_b
|
||||||
|
|
||||||
def apply(self, x: torch.Tensor, bias: torch.Tensor | None = None) -> torch.Tensor:
|
def apply(self, x: torch.Tensor, bias: torch.Tensor | None = None) -> torch.Tensor:
|
||||||
output = self.base_layer.quant_method.apply(self.base_layer, x)
|
output = self.base_layer.quant_method.apply(self.base_layer, x, bias)
|
||||||
|
|
||||||
x = x.view(-1, x.shape[-1])
|
x = x.view(-1, x.shape[-1])
|
||||||
output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape
|
output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape
|
||||||
|
|||||||
Reference in New Issue
Block a user