简要声明
- 学习相关网址
- 深度学习网络基于PyTorch学习架构,代码测试可跑。
- 本学习笔记单纯是为了能对学到的内容有更深入的理解,如果有错误的地方,恳请包容和指正。
参考文献
- PyTorch Tutorials [https://pytorch.org/tutorials/]
- PyTorch Docs [https://pytorch.org/docs/stable/index.html]
- LeNet (1998) [Gradient-based learning applied to document recognition]
简要介绍
LeNet
Dataset | MNIST |
---|---|
Input (feature maps) | 32×32 (28×28) |
CONV Layers | 2 |
FC Layers | 2 |
Activation | Sigmoid |
Output | 10 |
代码分析
函数库调用
python
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor
处理数据
数据下载
python
# 从开放数据集中下载训练数据
train_data = datasets.MNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)
# 从开放数据集中下载测试数据
test_data = datasets.MNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)
print(f'Number of training examples: {len(train_data)}')
print(f'Number of testing examples: {len(test_data)}')
Number of training examples: 60000
Number of testing examples: 10000
数据加载器(可选)
python
batch_size = 64
# 创建数据加载器
train_dataloader = DataLoader(train_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
for X, y in test_dataloader:
print(f"Shape of X [N, C, H, W]: {X.shape}")
print(f"Shape of y: {y.shape} {y.dtype}")
break
Shape of X [N, C, H, W]: torch.Size([64, 1, 28, 28])
Shape of y: torch.Size([64]) torch.int64
创建模型
python
# 选择训练设备
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"Using {device} device")
Using cuda device
python
class LeNet(nn.Module):
def __init__(self, output_dim):
super().__init__()
self.conv_1 = nn.Sequential(
nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=2),
nn.Sigmoid(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_2 = nn.Sequential(
nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
nn.Sigmoid(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.fc_1 = nn.Sequential(
nn.Linear(16*5*5, 120),
nn.Sigmoid()
)
self.fc_2 = nn.Sequential(
nn.Linear(120, 84),
nn.Sigmoid()
)
self.fc_3 = nn.Linear(84, output_dim)
def forward(self, x):
x = self.conv_1(x)
x = self.conv_2(x)
x = x.view(x.size(0), -1)
x = self.fc_1(x)
x = self.fc_2(x)
x = self.fc_3(x)
return x
model = LeNet(10).to(device)
print(model)
LeNet(
(conv_1): Sequential(
(0): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
(1): Sigmoid()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(conv_2): Sequential(
(0): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(1): Sigmoid()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(fc_1): Sequential(
(0): Linear(in_features=400, out_features=120, bias=True)
(1): Sigmoid()
)
(fc_2): Sequential(
(0): Linear(in_features=120, out_features=84, bias=True)
(1): Sigmoid()
)
(fc_3): Linear(in_features=84, out_features=10, bias=True)
)
训练模型
选择损失函数和优化器
python
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
训练循环
python
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
loss.backward()
optimizer.step()
optimizer.zero_grad()
if batch % 100 == 0:
loss, current = loss.item(), (batch + 1) * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
测试循环
python
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
训练模型
python
epochs = 10.
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model, loss_fn)
print("Done!")
Epoch 10
loss: 0.015569 [ 64/60000]
loss: 0.029817 [ 6464/60000]
loss: 0.043169 [12864/60000]
loss: 0.027709 [19264/60000]
loss: 0.021492 [25664/60000]
loss: 0.011533 [32064/60000]
loss: 0.045418 [38464/60000]
loss: 0.042875 [44864/60000]
loss: 0.152001 [51264/60000]
loss: 0.040214 [57664/60000]
Test Error:
Accuracy: 98.6%, Avg loss: 0.044844
模型处理
保存模型
python
model_name = 'LeNet'
model_file = model_name + ".pth"
torch.save(model.state_dict(), model_file)
print("Saved PyTorch Model State to " + model_file)
Saved PyTorch Model State to LeNet.pth
Summary
安装torchsummary
python
pip install torchsummary
调用summary
python
from torchsummary import summary
model = LeNet(10).to(device)
summary(model, (1, 28, 28))
python
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 6, 28, 28] 156
Sigmoid-2 [-1, 6, 28, 28] 0
MaxPool2d-3 [-1, 6, 14, 14] 0
Conv2d-4 [-1, 16, 10, 10] 2,416
Sigmoid-5 [-1, 16, 10, 10] 0
MaxPool2d-6 [-1, 16, 5, 5] 0
Linear-7 [-1, 120] 48,120
Sigmoid-8 [-1, 120] 0
Linear-9 [-1, 84] 10,164
Sigmoid-10 [-1, 84] 0
Linear-11 [-1, 10] 850
================================================================
Total params: 61,706
Trainable params: 61,706
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.00
Forward/backward pass size (MB): 0.11
Params size (MB): 0.24
Estimated Total Size (MB): 0.35
----------------------------------------------------------------