Pytorch从零开始实战05

Pytorch从零开始实战------运动鞋识别

本系列来源于365天深度学习训练营

原作者K同学

文章目录

环境准备

本文基于Jupyter notebook,使用Python3.8,Pytorch2.0.1+cu118,torchvision0.15.2,需读者自行配置好环境且有一些深度学习理论基础。本次实验的目的是了解如何设置动态学习率。

第一步,导入常用包。

python 复制代码
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn.functional as F
import random
from time import time
import numpy as np
import pandas as pd
import datetime
import gc
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'  # 用于避免jupyter环境突然关闭
torch.backends.cudnn.benchmark=True  # 用于加速GPU运算的代码

设置随机数种子,428不好用,这次设置为55

python 复制代码
torch.manual_seed(55)
torch.cuda.manual_seed(55)
torch.cuda.manual_seed_all(55)
random.seed(55)
np.random.seed(55)

创建设备对象,检测设备

python 复制代码
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device

数据集

本次实验是对运动鞋图片进行分类任务,共579张图片,是一个二分类任务,标签为adidas、nike,两种类别的图片分别存放在不同的文件夹中。

展示图片函数

python 复制代码
def plotsample(data):
    fig, axs = plt.subplots(1, 5, figsize=(10, 10)) #建立子图
    for i in range(5):
        num = random.randint(0, len(data) - 1) #首先选取随机数,随机选取五次
        #抽取数据中对应的图像对象,make_grid函数可将任意格式的图像的通道数升为3,而不改变图像原始的数据
        #而展示图像用的imshow函数最常见的输入格式也是3通道
        npimg = torchvision.utils.make_grid(data[num][0]).numpy()
        nplabel = data[num][1] #提取标签 
        #将图像由(3, weight, height)转化为(weight, height, 3),并放入imshow函数中读取
        axs[i].imshow(np.transpose(npimg, (1, 2, 0))) 
        axs[i].set_title(nplabel) #给每个子图加上标签
        axs[i].axis("off") #消除每个子图的坐标轴

查看classNames

python 复制代码
import pathlib
data_dir = './data/snk/train'
data_dir = pathlib.Path(data_dir) # 转成pathlib.Path对象

data_paths = list(data_dir.glob('*')) # [PosixPath('data/snk/train/adidas'), PosixPath('data/snk/train/nike')]
classNames = [str(path).split("/")[3] for path in data_paths]
classNames # 二分类问题 ['adidas', 'nike']

使用transforms来预处理原始数据,统一尺寸、转换为张量、标准化

python 复制代码
train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # 标准化
])

test_transforms = transforms.Compose([
    transforms.Resize([224, 224]),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # 标准化
])

# 根据文件名设置标签
train_dataset = datasets.ImageFolder("./data/snk/train/", transform=train_transforms)
test_dataset = datasets.ImageFolder("./data/snk/test/", transform=train_transforms)

随机查看5张图片

python 复制代码
plotsample(train_dataset)

使用DataLoader划分数据集,batch_size = 32

python 复制代码
batch_size = 32
train_dl = torch.utils.data.DataLoader(train_dataset,
                                        batch_size=batch_size,
                                        shuffle=True,
                                      )
test_dl = torch.utils.data.DataLoader(test_dataset,
                                        batch_size=batch_size,
                                        shuffle=True,
                                     )

len(train_dl.dataset), len(test_dl.dataset) # 503 76

模型选择

本次还是选择简单的卷积神经网络,这次写法使用Sequential,表示这一块是一个单独的模块。

python 复制代码
class Model(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 12, kernel_size=5), # 220
            nn.BatchNorm2d(12),
            nn.ReLU()
        )

        self.conv2 = nn.Sequential(
            nn.Conv2d(12, 12, kernel_size=5), # 216
            nn.BatchNorm2d(12),
            nn.ReLU()
        )

        self.pool3 = nn.Sequential(
            nn.MaxPool2d(2)             # 108
        )

        self.conv4 = nn.Sequential(
            nn.Conv2d(12, 24, kernel_size=5),  # 104
            nn.BatchNorm2d(24),
            nn.ReLU()
        )

        self.conv5 = nn.Sequential(
            nn.Conv2d(24, 24, kernel_size=5),  # 100
            nn.BatchNorm2d(24),
            nn.ReLU()
        )

        self.pool6 = nn.Sequential(
            nn.MaxPool2d(2)
        )

        self.dropout = nn.Sequential(
            nn.Dropout(0.2)
        )

        self.fc = nn.Sequential(
            nn.Linear(50 * 50 * 24, len(classNames))
        )

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.pool3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = self.pool6(x)
        x = self.dropout(x)
        x = x.view(-1, 50 * 50 * 24)
        x = self.fc(x)
        return x

模型初始化

python 复制代码
from torchsummary import summary
# 将模型转移到GPU中
model = Model().to(device)
summary(model, input_size=(3, 224, 224))

定义训练函数

python 复制代码
def train(dataloader, model, loss_fn, opt):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    train_acc, train_loss = 0, 0

    for X, y in dataloader:
        X, y = X.to(device), y.to(device)
        pred = model(X)
        loss = loss_fn(pred, y)

        opt.zero_grad()
        loss.backward()
        opt.step()

        train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()
        train_loss += loss.item()

    train_acc /= size
    train_loss /= num_batches
    return train_acc, train_loss

定义测试函数

python 复制代码
def test(dataloader, model, loss_fn):
    size = len(dataloader.dataset)
    num_batches = len(dataloader)
    test_acc, test_loss = 0, 0
    with torch.no_grad():
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)
            pred = model(X)
            loss = loss_fn(pred, y)
    
            test_acc += (pred.argmax(1) == y).type(torch.float).sum().item()
            test_loss += loss.item()

    test_acc /= size
    test_loss /= num_batches
    return test_acc, test_loss

定义一些超参数

python 复制代码
loss_fn = nn.CrossEntropyLoss()
learn_rate = 0.0001
opt = torch.optim.SGD(model.parameters(), lr=learn_rate)

定义学习率衰减函数,大概意思是随着epoch的增加,学习率会持续变小,使得模型更容易收敛

python 复制代码
def adjust_rate(opt, epoch, start_lr):
    lr = start_lr * (0.92 ** (epoch // 2))
    for param_group in opt.param_groups:
        param_group['lr'] = lr

开始训练

python 复制代码
import time
epochs = 30
train_loss = []
train_acc = []
test_loss = []
test_acc = []

T1 = time.time()

best_acc = 0
PATH = './my_model.pth'

for epoch in range(epochs):

    adjust_rate(opt, epoch, learn_rate)
    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, opt)
    
    model.eval() # 确保模型不会进行训练操作
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)

    if epoch_test_acc > best_acc:
        best_acc = epoch_test_acc
        torch.save(model.state_dict(), PATH)
        print("model save")
        
        
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    
    print("epoch:%d, train_acc:%.1f%%, train_loss:%.3f, test_acc:%.1f%%, test_loss:%.3f"
          % (epoch + 1, epoch_train_acc * 100, epoch_train_loss, epoch_test_acc * 100, epoch_test_loss))
print("Done")
T2 = time.time()
print('程序运行时间:%s毫秒' % ((T2 - T1)*1000))

但是效果好像不是很好,模型训练的时候卡在某个极小值不动了

经过实验,将学习率改为0.001,效果是最好的。

python 复制代码
import time
epochs = 30
train_loss = []
train_acc = []
test_loss = []
test_acc = []

T1 = time.time()

best_acc = 0
PATH = './my_model.pth'

for epoch in range(epochs):

    model.train()
    epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, opt)
    
    model.eval() # 确保模型不会进行训练操作
    epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)

    if epoch_test_acc > best_acc:
        best_acc = epoch_test_acc
        torch.save(model.state_dict(), PATH)
        print("model save")
        
    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)
    
    print("epoch:%d, train_acc:%.1f%%, train_loss:%.3f, test_acc:%.1f%%, test_loss:%.3f"
          % (epoch + 1, epoch_train_acc * 100, epoch_train_loss, epoch_test_acc * 100, epoch_test_loss))
print("Done")
T2 = time.time()
print('程序运行时间:%s毫秒' % ((T2 - T1)*1000))

在训练集上已经达到百分百准确率了,在测试集上的表现也很好。

数据可视化

python 复制代码
import warnings
warnings.filterwarnings("ignore")               #忽略警告信息
plt.rcParams['font.sans-serif']    = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False      # 用来正常显示负号
plt.rcParams['figure.dpi']         = 100        #分辨率

epochs_range = range(epochs)

plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)

plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()

模型预测

python 复制代码
from PIL import Image 

classes = list(train_dataset.class_to_idx)

def predict_one_image(image_path, model, transform, classes):
    
    test_img = Image.open(image_path).convert('RGB')
    plt.imshow(test_img)  # 展示预测的图片

    test_img = transform(test_img)
    img = test_img.to(device).unsqueeze(0) # 增加维度
    
    model.eval()
    output = model(img)

    _,pred = torch.max(output,1)
    pred_class = classes[pred]
    print(f'预测结果是:{pred_class}')

使用2.jpg开始预测

python 复制代码
predict_one_image(image_path='./data/snk/test/adidas/2.jpg', 
                  model=model, 
                  transform=train_transforms, 
                  classes=classes)

预测结果是:adidas

总结

学习率衰减是一个很有用的东西,但有的时候,使用学习率衰减好像还不如不使用学习率衰减,感觉容易提前收敛。

相关推荐
bryant_meng10 分钟前
【python】Distribution
开发语言·python·分布函数·常用分布
SongYuLong的博客11 分钟前
Air780E基于LuatOS编程开发
人工智能
Jina AI12 分钟前
RAG 系统的分块难题:小型语言模型如何找到最佳断点?
人工智能·语言模型·自然语言处理
-派神-22 分钟前
大语言模型(LLM)量化基础知识(一)
人工智能·语言模型·自然语言处理
johnny_hhh23 分钟前
AI大模型重塑软件开发流程:定义、应用场景、优势、挑战及未来展望
人工智能
Elastic 中国社区官方博客25 分钟前
释放专利力量:Patently 如何利用向量搜索和 NLP 简化协作
大数据·数据库·人工智能·elasticsearch·搜索引擎·自然语言处理
力姆泰克29 分钟前
看电动缸是如何提高农机的自动化水平
大数据·运维·服务器·数据库·人工智能·自动化·1024程序员节
力姆泰克29 分钟前
力姆泰克电动缸助力农业机械装备,提高农机的自动化水平
大数据·服务器·数据库·人工智能·1024程序员节
鱼满满记33 分钟前
1.6K+ Star!GenAIScript:一个可自动化的GenAI脚本环境
人工智能·ai·github
QYR市场调研34 分钟前
自动化研磨领域的革新者:半自动与自动自磨机的技术突破
大数据·人工智能