[torchao] Support quantization configs using module swap (#21982)
Signed-off-by: Jerry Zhang <jerryzh168@gmail.com>
This commit is contained in:
@@ -152,18 +152,20 @@ def torchao_quantize_param_data(param: torch.Tensor,
|
||||
from torchao.quantization import quantize_
|
||||
|
||||
assert isinstance(torchao_config, AOBaseConfig), f"{torchao_config}"
|
||||
"""
|
||||
Avoid real weight allocation for faster load, since we will
|
||||
"""
|
||||
Avoid real weight allocation for faster load, since we will
|
||||
end up setting it to param.
|
||||
"""
|
||||
with torch.device("meta"):
|
||||
dummy_linear = torch.nn.Linear(param.shape[1],
|
||||
param.shape[0],
|
||||
bias=False)
|
||||
# linear can't be top level module since quantize_ is inplace
|
||||
# while some of our configs need to do module swap, and only non-top
|
||||
# level modules support module swap
|
||||
dummy_linear = torch.nn.Sequential(
|
||||
torch.nn.Linear(param.shape[1], param.shape[0], bias=False))
|
||||
|
||||
dummy_linear.weight = param
|
||||
dummy_linear[0].weight = param
|
||||
quantize_(dummy_linear, torchao_config)
|
||||
return dummy_linear.weight
|
||||
return dummy_linear[0].weight
|
||||
|
||||
|
||||
class TorchAOLinearMethod(LinearMethodBase):
|
||||
|
||||
Reference in New Issue
Block a user