1. pytorch中loss函数使用方法示例
python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# 定义网络时需要继承nn.Module并实现它的forward方法,将网络中具有可学习参数的层放在构造函数__init__中
# 不具有可学习参数的层(如ReLu)既可以放在构造函数中也可以不放
# torch.nn.MaxPool2d和torch.nn.functional.max_pool2d,在pytorch构建模型中,都可以作为最大池化层的引入,但前者为类模块,后者为函数,在使用上存在不同。
# torch.nn.functional.max_pool2d是函数,可以直接调用;torch.nn.MaxPool2d是类模块,要先实例化,再调用其函数。
# torch.nn中其它模块跟torch.nn.functional中其它对应的函数也是类似的用法。
class myNet(torch.nn.Module):
def __init__(self):
super(myNet, self).__init__()
self.conv1 = torch.nn.Conv2d(1,6,5)
self.conv2 = torch.nn.Conv2d(6,16,5)
self.fc1 = torch.nn.Linear(16*5*5,120)
self.fc2 = torch.nn.Linear(120, 84)
self.fc3 = torch.nn.Linear(84, 10)
self.pooling = torch.nn.MaxPool2d(2)
self.activate = torch.nn.ReLU()
def forward(self, x):
x = self.pooling(self.activate(self.conv1(x)))
x = self.pooling(self.activate(self.conv2(x)))
x = x.view(x.size()[0], -1)
x = self.activate(self.fc1(x))
x = self.activate(self.fc2(x))
x = self.fc3(x)
return x
input = Variable(torch.randn(1,1,32,32))
net = myNet() # 创建myNet()对象
output = net(input) # 调用myNet()对象的forward()方法,有点类似C++中的operator()()
target = Variable(torch.arange(0, 10))
citerion = torch.nn.MSELoss() # 创建MSELoss()对象
loss = citerion(output.float(), target.float()) # 调用loss函数
print(loss)
print('*'*30)
net.zero_grad() # 把net中所有可学习参数的梯度清零
print(net.conv1.bias.grad)
loss.backward()
print(net.conv1.bias.grad)
输出结果:
bash
tensor(28.6363, grad_fn=<MseLossBackward0>)
******************************
None
tensor([ 0.1782, -0.0815, -0.0902, -0.0140, 0.0267, 0.0015])