pytorch The implementation of the linear module in is as follows , stay init Function weight Values and bias value .
class Linear(Module):
__constants__ = ['bias', 'in_features', 'out_features']
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def forward(self, input):
return F.linear(input, self.weight, self.bias)
So if you want to linear The parameters of the sub module are initialized , The following strategies can be used for individual linear The sub module initializes the parameters .
import torch.nn as nn
from torch.nn import init
from collections import OrderedDict
net = nn.Sequential(OrderedDict([
('linear', nn.Linear(num_inputs, 1))
]))
print(net )
print(net[0])
init.normal_(net[0].weight, mean=0.0, std=0.01)
init.constant_(net[0].bias, val=0.0) # It can also be modified directly bias Of data: net[0].bias.data.fill_(0)
#----------------
LinearNet(
(linear): Linear(in_features=2, out_features=1, bias=True)
)
Linear(in_features=2, out_features=1, bias=True)
<class 'torch.nn.modules.linear.Linear'>