动手学深度学习(Pytorch版)代码实践 -卷积神经网络-24深度卷积神经网络AlexNet

24深度卷积神经网络AlexNet

python 复制代码
import torch
from torch import nn
import liliPytorch as lp
import liliPytorch as lp
import matplotlib.pyplot as plt

dropout1 = 0.5
#Alexnet架构
net = nn.Sequential(
    nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Conv2d(96, 256, kernel_size=5, padding=2),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Conv2d(256, 384, kernel_size=3, padding=1),
    nn.ReLU(),
    nn.Conv2d(384, 384, kernel_size=3, padding=1),
    nn.ReLU(),
    nn.Conv2d(384, 256, kernel_size=3, padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),
    
    nn.Flatten(),
    nn.Linear(6400, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096,10)
)

#魔改一下
lilinet = nn.Sequential(
    nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Conv2d(96, 256, kernel_size=5, padding=2),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Flatten(),
    nn.Linear(6400, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096,10)
)


# 通过在每一层打印输出的形状,我们可以检查模型
X = torch.rand(size=(1, 1, 224, 224), dtype=torch.float32) 
for layer in net:
    X = layer(X) # 将输入依次通过每一层
    print(layer.__class__.__name__, 'output shape: \t', X.shape) # 打印每一层的输出形状
"""
Conv2d output shape:     torch.Size([1, 96, 54, 54])
ReLU output shape:       torch.Size([1, 96, 54, 54])
MaxPool2d output shape:          torch.Size([1, 96, 26, 26])
Conv2d output shape:     torch.Size([1, 256, 26, 26])
ReLU output shape:       torch.Size([1, 256, 26, 26])
MaxPool2d output shape:          torch.Size([1, 256, 12, 12])
Conv2d output shape:     torch.Size([1, 384, 12, 12])
ReLU output shape:       torch.Size([1, 384, 12, 12])
Conv2d output shape:     torch.Size([1, 384, 12, 12])
ReLU output shape:       torch.Size([1, 384, 12, 12])
Conv2d output shape:     torch.Size([1, 256, 12, 12])
ReLU output shape:       torch.Size([1, 256, 12, 12])
MaxPool2d output shape:          torch.Size([1, 256, 5, 5])
Flatten output shape:    torch.Size([1, 6400])
Linear output shape:     torch.Size([1, 4096])
ReLU output shape:       torch.Size([1, 4096])
Dropout output shape:    torch.Size([1, 4096])
Linear output shape:     torch.Size([1, 4096])
ReLU output shape:       torch.Size([1, 4096])
Dropout output shape:    torch.Size([1, 4096])
Linear output shape:     torch.Size([1, 10])
"""

#读取数据集
batch_size = 64
train_iter, test_iter = lp.loda_data_fashion_mnist(batch_size,  resize=224) # 加载Fashion-MNIST数据集

#Alexnet架构
# lr, num_epochs = 0.01, 10
# batch_size = 128
# lp.train_ch6(net, train_iter, test_iter, num_epochs, lr, lp.try_gpu())
# loss 0.329, train acc 0.879, test acc 0.883

# 魔改
lr, num_epochs = 0.1, 10
lp.train_ch6(lilinet, train_iter, test_iter, num_epochs, lr, lp.try_gpu())
plt.show() # 显示训练曲线

#lr, num_epochs = 0.01, 10
#batch_size = 128
#loss 0.356, train acc 0.868, test acc 0.870

#lr, num_epochs = 0.1, 10
#batch_size = 64
#loss 0.212, train acc 0.920, test acc 0.903

运行结果:

相关推荐
workflower5 小时前
时序数据获取事件
开发语言·人工智能·python·深度学习·机器学习·结对编程
java1234_小锋6 小时前
Transformer 大语言模型(LLM)基石 - Transformer架构详解 - 编码器(Encoder)详解以及算法实现
深度学习·语言模型·transformer
yzx9910136 小时前
深度学习的进化之路:从感知机到通用智能的曙光
人工智能·深度学习
老马啸西风7 小时前
成熟企业级技术平台-10-跳板机 / 堡垒机(Bastion Host)详解
人工智能·深度学习·算法·职场和发展
老马啸西风7 小时前
成熟企业级技术平台-09-加密机 / 密钥管理服务 KMSS(Key Management & Security Service)
人工智能·深度学习·算法·职场和发展
XiaoMu_0019 小时前
基于深度学习的农作物叶片病害智能识别与防治系统
人工智能·深度学习
Coding茶水间10 小时前
基于深度学习的脑肿瘤检测系统演示与介绍(YOLOv12/v11/v8/v5模型+Pyqt5界面+训练代码+数据集)
人工智能·深度学习·yolo·目标检测·机器学习·计算机视觉
L.EscaRC10 小时前
【AI基础篇】认识RNN
人工智能·rnn·深度学习
AAD5558889911 小时前
【番茄病害检测】基于Faster R-CNN的番茄黄叶卷曲病毒智能识别系统,完整实现与代码解析
目标跟踪·cnn
裤裤兔12 小时前
卷积神经网络中的自适应池化
人工智能·神经网络·cnn·自适应池化