简单神经网络模型的搭建
- 针对CIFAR 10数据集的神经网络模型结构如下图:
- 由于上图的结构没有给出具体的padding、stride的值,所以我们需要根据以下公式,手动推算:
- 注意:当stride太大时,padding也会变得很大,这不合理,所以stride从1开始推,dilation没有特殊说明为空洞卷积的话(默认为1)
- 第一个卷积层的padding、stride如下:
- 网络模型代码如下:
bash
复制代码
import torch.nn
from torch import nn
class Tudui(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, 5, 1, 2)
self.maxpool1 = nn.MaxPool2d(2,2)
self.conv2 = nn.Conv2d(32, 32, 5, 1, 2)
self.maxpool2 = nn.MaxPool2d(2,2)
self.conv3 = nn.Conv2d(32, 64, 5, 1, 2)
self.maxpool3 = nn.MaxPool2d(2,2)
self.flatten = nn.Flatten()
# flatten也有层,跟torch.flatten()用法不一样,flatten层不会合并batch_size,只会将batch_size内的每个样本的数据展平,但是torch.flatten()会将整个输入数据展平,即会合并batch_size
self.linear1 = nn.Linear(1024, 64)
self.linear2 = nn.Linear(64, 10) # 为什么最后的输出是10,因为CIFAR10有10个类别,最后输出各类别的概率,取最大的那个概率对应的类别作为预测结果
def forward(self, input):
x = self.conv1(input)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = self.maxpool3(x)
x = self.flatten(x)
x = self.linear1(x)
output = self.linear2(x)
return output
tudui = Tudui()
# 通过ones()函数创建一个全1的tensor,作为输入数据,我们只需要指定输入数据的形状即可
# 我们可以通过ones()创建的简单输入,来检测网络的结构是否正确
input = torch.ones([64,3,32,32])
print(tudui)
output = tudui(input)
print(output.shape) # 输出的shape为[64, 10],即每个样本的输出是10个类别的概率
# 输出结果:
# Tudui(
# (conv1): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
# (maxpool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (conv2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
# (maxpool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (conv3): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
# (maxpool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (flatten): Flatten(start_dim=1, end_dim=-1)
# (linear1): Linear(in_features=1024, out_features=64, bias=True)
# (linear2): Linear(in_features=64, out_features=10, bias=True)
# )
# torch.Size([64, 10])
- 我们可以使用sequential来合并各种层,简化代码,如下:
bash
复制代码
import torch.nn
from torch import nn
class Tudui(nn.Module):
def __init__(self):
super().__init__()
self.module1 = nn.Sequential(
nn.Conv2d(3, 32, 5, 1, 2),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(1024, 64),
nn.Linear(64, 10)
)
def forward(self, input):
output = self.module1(input)
return output
tudui = Tudui()
# 通过ones()函数创建一个全1的tensor,作为输入数据,我们只需要指定输入数据的形状即可
input = torch.ones([64,3,32,32])
print(tudui)
output = tudui(input)
print(output.shape) # 输出的shape为[64, 10],即每个样本的输出是10个类别的概率
# 输出结果:
# Tudui(
# (module1): Sequential(
# (0): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
# (1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
# (3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (4): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
# (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# (6): Flatten(start_dim=1, end_dim=-1)
# (7): Linear(in_features=1024, out_features=64, bias=True)
# (8): Linear(in_features=64, out_features=10, bias=True)
# )
# )
# torch.Size([64, 10])
- 也可以使用tensorboard来可视化模型,代码如下:
bash
复制代码
import torch.nn
from torch import nn
from torch.utils.tensorboard import SummaryWriter
class Tudui(nn.Module):
def __init__(self):
super().__init__()
self.module1 = nn.Sequential(
nn.Conv2d(3, 32, 5, 1, 2),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 32, 5, 1, 2),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 64, 5, 1, 2),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(1024, 64),
nn.Linear(64, 10)
)
def forward(self, input):
output = self.module1(input)
return output
writer = SummaryWriter('logs_seq')
tudui = Tudui()
# 通过ones()函数创建一个全1的tensor,作为输入数据,我们只需要指定输入数据的形状即可
input = torch.ones([64,3,32,32])
print(tudui)
output = tudui(input)
print(output.shape) # 输出的shape为[64, 10],即每个样本的输出是10个类别的概率
writer.add_graph(tudui, input) # 将模型和输入数据写入TensorBoard
writer.close()
- 结果如下: