动手学深度学习(Pytorch版)代码实践 -卷积神经网络-24深度卷积神经网络AlexNet

24深度卷积神经网络AlexNet

python 复制代码
import torch
from torch import nn
import liliPytorch as lp
import liliPytorch as lp
import matplotlib.pyplot as plt

dropout1 = 0.5
#Alexnet架构
net = nn.Sequential(
    nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Conv2d(96, 256, kernel_size=5, padding=2),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Conv2d(256, 384, kernel_size=3, padding=1),
    nn.ReLU(),
    nn.Conv2d(384, 384, kernel_size=3, padding=1),
    nn.ReLU(),
    nn.Conv2d(384, 256, kernel_size=3, padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),
    
    nn.Flatten(),
    nn.Linear(6400, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096,10)
)

#魔改一下
lilinet = nn.Sequential(
    nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Conv2d(96, 256, kernel_size=5, padding=2),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Flatten(),
    nn.Linear(6400, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096,10)
)


# 通过在每一层打印输出的形状,我们可以检查模型
X = torch.rand(size=(1, 1, 224, 224), dtype=torch.float32) 
for layer in net:
    X = layer(X) # 将输入依次通过每一层
    print(layer.__class__.__name__, 'output shape: \t', X.shape) # 打印每一层的输出形状
"""
Conv2d output shape:     torch.Size([1, 96, 54, 54])
ReLU output shape:       torch.Size([1, 96, 54, 54])
MaxPool2d output shape:          torch.Size([1, 96, 26, 26])
Conv2d output shape:     torch.Size([1, 256, 26, 26])
ReLU output shape:       torch.Size([1, 256, 26, 26])
MaxPool2d output shape:          torch.Size([1, 256, 12, 12])
Conv2d output shape:     torch.Size([1, 384, 12, 12])
ReLU output shape:       torch.Size([1, 384, 12, 12])
Conv2d output shape:     torch.Size([1, 384, 12, 12])
ReLU output shape:       torch.Size([1, 384, 12, 12])
Conv2d output shape:     torch.Size([1, 256, 12, 12])
ReLU output shape:       torch.Size([1, 256, 12, 12])
MaxPool2d output shape:          torch.Size([1, 256, 5, 5])
Flatten output shape:    torch.Size([1, 6400])
Linear output shape:     torch.Size([1, 4096])
ReLU output shape:       torch.Size([1, 4096])
Dropout output shape:    torch.Size([1, 4096])
Linear output shape:     torch.Size([1, 4096])
ReLU output shape:       torch.Size([1, 4096])
Dropout output shape:    torch.Size([1, 4096])
Linear output shape:     torch.Size([1, 10])
"""

#读取数据集
batch_size = 64
train_iter, test_iter = lp.loda_data_fashion_mnist(batch_size,  resize=224) # 加载Fashion-MNIST数据集

#Alexnet架构
# lr, num_epochs = 0.01, 10
# batch_size = 128
# lp.train_ch6(net, train_iter, test_iter, num_epochs, lr, lp.try_gpu())
# loss 0.329, train acc 0.879, test acc 0.883

# 魔改
lr, num_epochs = 0.1, 10
lp.train_ch6(lilinet, train_iter, test_iter, num_epochs, lr, lp.try_gpu())
plt.show() # 显示训练曲线

#lr, num_epochs = 0.01, 10
#batch_size = 128
#loss 0.356, train acc 0.868, test acc 0.870

#lr, num_epochs = 0.1, 10
#batch_size = 64
#loss 0.212, train acc 0.920, test acc 0.903

运行结果:

相关推荐
A-大程序员5 小时前
【pytorch】合并与分割
人工智能·pytorch·深度学习
材料科学研究7 小时前
掌握PINN:从理论到实战的神经网络进阶!!
深度学习·神经网络·pinn
B站计算机毕业设计之家7 小时前
多模态项目:Python人脸表情系统 CNN算法 神经网络+Adaboost定位+PyQt5界面 源码+文档 深度学习实战✅
python·深度学习·神经网络·opencv·yolo·计算机视觉·情绪识别
AI人工智能+9 小时前
行驶证识别技术通过OCR和AI实现信息自动化采集与处理,涵盖图像预处理、文字识别及结构化校验,提升效率与准确性
人工智能·深度学习·ocr·行驶证识别
一条星星鱼10 小时前
深度学习中的归一化:从BN到LN到底是怎么工作的?
人工智能·深度学习·算法·归一化
Rock_yzh10 小时前
AI学习日记——深度学习
人工智能·python·深度学习·神经网络·学习
路长冬12 小时前
记录一次深度学习+SSH的配置
深度学习
清风吹过12 小时前
LSTM新架构论文分享6:LSTM+Transformer融合
论文阅读·人工智能·深度学习·神经网络·lstm·transformer
千年人参10013 小时前
AI 与神经网络:从理论到现代应用
人工智能·深度学习·神经网络
孤狼warrior14 小时前
爬虫+卷积神经网络项目实战解析——对图像狗的识别分类
人工智能·爬虫·神经网络·cnn·卷积神经网络