参数管理

我们首先关注具有单隐藏层的多层感知机

In [1]:
import torch
from torch import nn

net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))
X = torch.rand(size=(2, 4))
net(X)
Out[1]:
tensor([[-0.5876],
        [-0.5556]], grad_fn=<AddmmBackward>)

参数访问

In [2]:
print(net[2].state_dict())
OrderedDict([('weight', tensor([[ 0.0181,  0.0557,  0.0219, -0.3431,  0.1738, -0.0249,  0.1345, -0.2593]])), ('bias', tensor([-0.3221]))])

目标参数

In [3]:
print(type(net[2].bias))
print(net[2].bias)
print(net[2].bias.data)
<class 'torch.nn.parameter.Parameter'>
Parameter containing:
tensor([-0.3221], requires_grad=True)
tensor([-0.3221])
In [4]:
net[2].weight.grad == None
Out[4]:
True

一次性访问所有参数

In [5]:
print(*[(name, param.shape) for name, param in net[0].named_parameters()])
print(*[(name, param.shape) for name, param in net.named_parameters()])
('weight', torch.Size([8, 4])) ('bias', torch.Size([8]))
('0.weight', torch.Size([8, 4])) ('0.bias', torch.Size([8])) ('2.weight', torch.Size([1, 8])) ('2.bias', torch.Size([1]))
In [6]:
net.state_dict()['2.bias'].data
Out[6]:
tensor([-0.3221])

从嵌套块收集参数

In [7]:
def block1():
    return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4),
                         nn.ReLU())

def block2():
    net = nn.Sequential()
    for i in range(4):
        net.add_module(f'block {i}', block1())
    return net

rgnet = nn.Sequential(block2(), nn.Linear(4, 1))
rgnet(X)
Out[7]:
tensor([[0.2755],
        [0.2755]], grad_fn=<AddmmBackward>)

我们已经设计了网络,让我们看看它是如何组织的

In [8]:
print(rgnet)
Sequential(
  (0): Sequential(
    (block 0): Sequential(
      (0): Linear(in_features=4, out_features=8, bias=True)
      (1): ReLU()
      (2): Linear(in_features=8, out_features=4, bias=True)
      (3): ReLU()
    )
    (block 1): Sequential(
      (0): Linear(in_features=4, out_features=8, bias=True)
      (1): ReLU()
      (2): Linear(in_features=8, out_features=4, bias=True)
      (3): ReLU()
    )
    (block 2): Sequential(
      (0): Linear(in_features=4, out_features=8, bias=True)
      (1): ReLU()
      (2): Linear(in_features=8, out_features=4, bias=True)
      (3): ReLU()
    )
    (block 3): Sequential(
      (0): Linear(in_features=4, out_features=8, bias=True)
      (1): ReLU()
      (2): Linear(in_features=8, out_features=4, bias=True)
      (3): ReLU()
    )
  )
  (1): Linear(in_features=4, out_features=1, bias=True)
)
In [9]:
rgnet[0][1][0].bias.data
Out[9]:
tensor([-0.0178,  0.4432,  0.4543, -0.3561, -0.0851, -0.4227,  0.3945, -0.4169])

内置初始化

In [10]:
def init_normal(m):
    if type(m) == nn.Linear:
        nn.init.normal_(m.weight, mean=0, std=0.01)
        nn.init.zeros_(m.bias)

net.apply(init_normal)
net[0].weight.data[0], net[0].bias.data[0]
Out[10]:
(tensor([-0.0013,  0.0037, -0.0172,  0.0156]), tensor(0.))
In [11]:
def init_constant(m):
    if type(m) == nn.Linear:
        nn.init.constant_(m.weight, 1)
        nn.init.zeros_(m.bias)

net.apply(init_constant)
net[0].weight.data[0], net[0].bias.data[0]
Out[11]:
(tensor([1., 1., 1., 1.]), tensor(0.))

对某些块应用不同的初始化方法

In [12]:
def xavier(m):
    if type(m) == nn.Linear:
        nn.init.xavier_uniform_(m.weight)

def init_42(m):
    if type(m) == nn.Linear:
        nn.init.constant_(m.weight, 42)

net[0].apply(xavier)
net[2].apply(init_42)
print(net[0].weight.data[0])
print(net[2].weight.data)
tensor([-0.0453, -0.3169,  0.3091, -0.2077])
tensor([[42., 42., 42., 42., 42., 42., 42., 42.]])

自定义初始化

In [13]:
def my_init(m):
    if type(m) == nn.Linear:
        print(
            "Init",
            *[(name, param.shape) for name, param in m.named_parameters()][0])
        nn.init.uniform_(m.weight, -10, 10)
        m.weight.data *= m.weight.data.abs() >= 5

net.apply(my_init)
net[0].weight[:2]
Init weight torch.Size([8, 4])
Init weight torch.Size([1, 8])
Out[13]:
tensor([[ 0.0000, -9.6254,  0.0000,  6.0600],
        [ 0.0000,  7.9085, -0.0000, -0.0000]], grad_fn=<SliceBackward>)
In [14]:
net[0].weight.data[:] += 1
net[0].weight.data[0, 0] = 42
net[0].weight.data[0]
Out[14]:
tensor([42.0000, -8.6254,  1.0000,  7.0600])

参数绑定

In [15]:
shared = nn.Linear(8, 8)
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), shared, nn.ReLU(), shared,
                    nn.ReLU(), nn.Linear(8, 1))
net(X)
print(net[2].weight.data[0] == net[4].weight.data[0])
net[2].weight.data[0, 0] = 100
print(net[2].weight.data[0] == net[4].weight.data[0])
tensor([True, True, True, True, True, True, True, True])
tensor([True, True, True, True, True, True, True, True])