Pytorch-MLP-Mnist

文章目录

model.py

py 复制代码
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init

class MLP_cls(nn.Module):
    def __init__(self,in_dim=28*28):
        super(MLP_cls,self).__init__()
        self.lin1 = nn.Linear(in_dim,128)
        self.lin2 = nn.Linear(128,64)
        self.lin3 = nn.Linear(64,10)
        self.relu = nn.ReLU()
        init.xavier_uniform_(self.lin1.weight)
        init.xavier_uniform_(self.lin2.weight)
        init.xavier_uniform_(self.lin3.weight)

    def forward(self,x):
        x = x.view(-1,28*28)
        x = self.lin1(x)
        x = self.relu(x)
        x = self.lin2(x)
        x = self.relu(x)
        x = self.lin3(x)
        x = self.relu(x)
        return x

main.py

py 复制代码
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import DataLoader
import torch.optim as optim
from model import MLP_cls


seed = 42
torch.manual_seed(seed)
batch_size_train = 64
batch_size_test  = 64
epochs = 10
learning_rate = 0.01
momentum = 0.5
mlp_net = MLP_cls()

train_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('./data/', train=True, download=True,
                               transform=torchvision.transforms.Compose([
                                   torchvision.transforms.ToTensor(),
                                   torchvision.transforms.Normalize(
                                       (0.5,), (0.5,))
                               ])),
    batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('./data/', train=False, download=True,
                               transform=torchvision.transforms.Compose([
                                   torchvision.transforms.ToTensor(),
                                   torchvision.transforms.Normalize(
                                       (0.5,), (0.5,))
                               ])),
    batch_size=batch_size_test, shuffle=True)

optimizer = optim.SGD(mlp_net.parameters(), lr=learning_rate,momentum=momentum)
criterion = nn.CrossEntropyLoss()

print("****************Begin Training****************")
mlp_net.train()
for epoch in range(epochs):
    run_loss = 0
    correct_num = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        out = mlp_net(data)
        _,pred = torch.max(out,dim=1)
        optimizer.zero_grad()
        loss = criterion(out,target)
        loss.backward()
        run_loss += loss
        optimizer.step()
        correct_num  += torch.sum(pred==target)
    print('epoch',epoch,'loss {:.2f}'.format(run_loss.item()/len(train_loader)),'accuracy {:.2f}'.format(correct_num.item()/(len(train_loader)*batch_size_train)))



print("****************Begin Testing****************")
mlp_net.eval()
test_loss = 0
test_correct_num = 0
for batch_idx, (data, target) in enumerate(test_loader):
    out = mlp_net(data)
    _,pred = torch.max(out,dim=1)
    test_loss += criterion(out,target)
    test_correct_num  += torch.sum(pred==target)
print('loss {:.2f}'.format(test_loss.item()/len(test_loader)),'accuracy {:.2f}'.format(test_correct_num.item()/(len(test_loader)*batch_size_test)))

参数设置

bash 复制代码
'./data/' #数据保存路径
seed = 42 #随机种子
batch_size_train = 64
batch_size_test  = 64
epochs = 10

optim --> SGD
learning_rate = 0.01
momentum = 0.5

注意事项

初始化权重

这里使用这种方式

py 复制代码
        init.xavier_uniform_(self.lin1.weight)
        init.xavier_uniform_(self.lin2.weight)
        init.xavier_uniform_(self.lin3.weight)

如果发现loss和acc不变

检查一下是不是忘记写optimizer.step()了

关于数据下载

数据在download=True时,会下载在./data文件夹下

关于输出格式

这里用'xxx {:.2f}'.format(xxx),保留两位小数。注意中间的空格,区分:.2f和%2f

运行图

相关推荐
豌豆花下猫3 分钟前
Python 潮流周刊#78:async/await 是糟糕的设计(摘要)
后端·python·ai
只因在人海中多看了你一眼7 分钟前
python语言基础
开发语言·python
deephub9 分钟前
使用 PyTorch-BigGraph 构建和部署大规模图嵌入的完整教程
人工智能·pytorch·深度学习·图嵌入
小技与小术14 分钟前
数据结构之树与二叉树
开发语言·数据结构·python
羞儿15 分钟前
【读点论文】Text Detection Forgot About Document OCR,很实用的一个实验对比案例,将科研成果与商业产品进行碰撞
深度学习·ocr·str·std
hummhumm40 分钟前
第 25 章 - Golang 项目结构
java·开发语言·前端·后端·python·elasticsearch·golang
deephub41 分钟前
优化注意力层提升 Transformer 模型效率:通过改进注意力机制降低机器学习成本
人工智能·深度学习·transformer·大语言模型·注意力机制
杜小满44 分钟前
周志华深度森林deep forest(deep-forest)最新可安装教程,仅需在pycharm中完成,超简单安装教程
python·随机森林·pycharm·集成学习
搏博1 小时前
神经网络问题之二:梯度爆炸(Gradient Explosion)
人工智能·深度学习·神经网络
不高明的骗子1 小时前
【深度学习之一】2024最新pytorch+cuda+cudnn下载安装搭建开发环境
人工智能·pytorch·深度学习·cuda