动手学深度学习(Pytorch版)代码实践 -卷积神经网络-24深度卷积神经网络AlexNet

24深度卷积神经网络AlexNet

python 复制代码
import torch
from torch import nn
import liliPytorch as lp
import liliPytorch as lp
import matplotlib.pyplot as plt

dropout1 = 0.5
#Alexnet架构
net = nn.Sequential(
    nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Conv2d(96, 256, kernel_size=5, padding=2),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Conv2d(256, 384, kernel_size=3, padding=1),
    nn.ReLU(),
    nn.Conv2d(384, 384, kernel_size=3, padding=1),
    nn.ReLU(),
    nn.Conv2d(384, 256, kernel_size=3, padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),
    
    nn.Flatten(),
    nn.Linear(6400, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096,10)
)

#魔改一下
lilinet = nn.Sequential(
    nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Conv2d(96, 256, kernel_size=5, padding=2),
    nn.ReLU(),
    nn.MaxPool2d(kernel_size=3, stride=2),
    nn.MaxPool2d(kernel_size=3, stride=2),

    nn.Flatten(),
    nn.Linear(6400, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096, 4096),
    nn.ReLU(),
    nn.Dropout(dropout1),
    nn.Linear(4096,10)
)


# 通过在每一层打印输出的形状,我们可以检查模型
X = torch.rand(size=(1, 1, 224, 224), dtype=torch.float32) 
for layer in net:
    X = layer(X) # 将输入依次通过每一层
    print(layer.__class__.__name__, 'output shape: \t', X.shape) # 打印每一层的输出形状
"""
Conv2d output shape:     torch.Size([1, 96, 54, 54])
ReLU output shape:       torch.Size([1, 96, 54, 54])
MaxPool2d output shape:          torch.Size([1, 96, 26, 26])
Conv2d output shape:     torch.Size([1, 256, 26, 26])
ReLU output shape:       torch.Size([1, 256, 26, 26])
MaxPool2d output shape:          torch.Size([1, 256, 12, 12])
Conv2d output shape:     torch.Size([1, 384, 12, 12])
ReLU output shape:       torch.Size([1, 384, 12, 12])
Conv2d output shape:     torch.Size([1, 384, 12, 12])
ReLU output shape:       torch.Size([1, 384, 12, 12])
Conv2d output shape:     torch.Size([1, 256, 12, 12])
ReLU output shape:       torch.Size([1, 256, 12, 12])
MaxPool2d output shape:          torch.Size([1, 256, 5, 5])
Flatten output shape:    torch.Size([1, 6400])
Linear output shape:     torch.Size([1, 4096])
ReLU output shape:       torch.Size([1, 4096])
Dropout output shape:    torch.Size([1, 4096])
Linear output shape:     torch.Size([1, 4096])
ReLU output shape:       torch.Size([1, 4096])
Dropout output shape:    torch.Size([1, 4096])
Linear output shape:     torch.Size([1, 10])
"""

#读取数据集
batch_size = 64
train_iter, test_iter = lp.loda_data_fashion_mnist(batch_size,  resize=224) # 加载Fashion-MNIST数据集

#Alexnet架构
# lr, num_epochs = 0.01, 10
# batch_size = 128
# lp.train_ch6(net, train_iter, test_iter, num_epochs, lr, lp.try_gpu())
# loss 0.329, train acc 0.879, test acc 0.883

# 魔改
lr, num_epochs = 0.1, 10
lp.train_ch6(lilinet, train_iter, test_iter, num_epochs, lr, lp.try_gpu())
plt.show() # 显示训练曲线

#lr, num_epochs = 0.01, 10
#batch_size = 128
#loss 0.356, train acc 0.868, test acc 0.870

#lr, num_epochs = 0.1, 10
#batch_size = 64
#loss 0.212, train acc 0.920, test acc 0.903

运行结果:

相关推荐
聆风吟º1 小时前
CANN runtime 全链路拆解:AI 异构计算运行时的任务管理与功能适配技术路径
人工智能·深度学习·神经网络·cann
User_芊芊君子1 小时前
CANN大模型推理加速引擎ascend-transformer-boost深度解析:毫秒级响应的Transformer优化方案
人工智能·深度学习·transformer
智驱力人工智能2 小时前
小区高空抛物AI实时预警方案 筑牢社区头顶安全的实践 高空抛物检测 高空抛物监控安装教程 高空抛物误报率优化方案 高空抛物监控案例分享
人工智能·深度学习·opencv·算法·安全·yolo·边缘计算
人工不智能5772 小时前
拆解 BERT:Output 中的 Hidden States 到底藏了什么秘密?
人工智能·深度学习·bert
h64648564h2 小时前
CANN 性能剖析与调优全指南:从 Profiling 到 Kernel 级优化
人工智能·深度学习
心疼你的一切2 小时前
解密CANN仓库:AIGC的算力底座、关键应用与API实战解析
数据仓库·深度学习·aigc·cann
学电子她就能回来吗4 小时前
深度学习速成:损失函数与反向传播
人工智能·深度学习·学习·计算机视觉·github
爱吃泡芙的小白白4 小时前
突破传统:CNN卷积层(普通/空洞)核心技术演进与实战指南
人工智能·神经网络·cnn·卷积层·空洞卷积·普通卷积
Coder_Boy_5 小时前
TensorFlow小白科普
人工智能·深度学习·tensorflow·neo4j
大模型玩家七七5 小时前
梯度累积真的省显存吗?它换走的是什么成本
java·javascript·数据库·人工智能·深度学习