应该算是完结啦~再次感谢土堆老师!
模型训练
模型训练基本可以分为以下几个步骤按序执行:
引入数据集-使用dataloader加载数据集-建立模型-设置损失函数-设置优化器-进行训练-训练中计算损失,并使用优化器更新参数-模型测试-模型存储
习惯上会将model和train代码分开写,当然一开始混合写也没啥问题,直接给出一个例程:
python
# train.py
import torch
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
import torch.nn as nn
from model import *
import time
from torch.utils.tensorboard import SummaryWriter
data_transforms = transforms.Compose([
transforms.ToTensor()
])
#引入数据集
train_data = datasets.CIFAR10("./dataset",
train=True,
transform=data_transforms,
download=True)
test_data = datasets.CIFAR10("./dataset",
train=False,
transform=data_transforms,
download=True)
#加载数据
train_dataloader = DataLoader(train_data,batch_size=64)
test_dataloader = DataLoader(test_data,batch_size=64)
start_time = time.time()
#建立模型
my_module = MyModule()
#设置损失函数
cross_loss = nn.CrossEntropyLoss()
#设置优化器
#设置学习率
learning_rate = 1e-2
optimizer = torch.optim.SGD(my_module.parameters(),lr=learning_rate)
#进行训练
#设置迭代次数
epoch = 10
total_train_steps = 0
writer = SummaryWriter("train_logs")
for i in range(epoch):
print("第{}轮训练".format(i+1))
#训练
my_module.train() #只对某些层起作用
for data in train_dataloader:
imgs, targets = data
outputs = my_module(imgs)
#计算损失
loss = cross_loss(outputs, targets)
#优化模型
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_steps +=1
if total_train_steps % 100 ==0:
print("训练次数:{},Loss:{}".format(total_train_steps,loss.item()))
writer.add_scalar("train_loss",loss.item(),total_train_steps)
#测试,不再梯度下降
my_module.eval() #同样只对某些层起作用
total_test_loss = 0
# total_test_steps = 0
total_accuracy = 0
test_data_size = len(test_data)
with torch.no_grad():
for data in test_dataloader:
imgs, targets = data
outputs = my_module(imgs)
loss = cross_loss(outputs,targets)
total_test_loss += loss.item()
##对于分类任务可以求一下准确的个数,非必须
#argmax(1)按行取最大的下标 argmax(0)按列取最大的下标
accuracy = (outputs.argmax(1)==targets).sum()
total_accuracy += accuracy
print("第{}轮的测试集Loss:{}".format(i+1,total_test_loss))
print("测试集准确率:{}".format(total_accuracy/test_data_size))
writer.add_scalar("test_loss",total_test_loss,i)
end_time = time.time()
print("time:{}".format(end_time-start_time))
#存储模型
if i % 5 == 0:
torch.save(my_module,"my_module_{}.pth".format(i))
print("模型存储成功")
writer.close()
使用GPU加速训练(方式一)
上述写法默认是采用cpu进行训练的,会比较慢,为了加速训练过程,我们需要用GPU进行加速训练。对应有两种方式,推荐方式二 (大部分例程也都是采用方式二的)
需要用到GPU加速的主要有如图中的三个部分:
对应有.cuda()的参数,只要在原始位置后面加上.cuda()就可以,考虑到有些设备没有GPU,建议加上.cuda.is_avaliable的判断。同样给出完整例程:
python
# train_gpu1.py
import torch
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
import torch.nn as nn
from model import *
import time
from torch.utils.tensorboard import SummaryWriter
data_transforms = transforms.Compose([
transforms.ToTensor()
])
#引入数据集
train_data = datasets.CIFAR10("./dataset",
train=True,
transform=data_transforms,
download=True)
test_data = datasets.CIFAR10("./dataset",
train=False,
transform=data_transforms,
download=True)
#加载数据
train_dataloader = DataLoader(train_data,batch_size=64)
test_dataloader = DataLoader(test_data,batch_size=64)
start_time = time.time()
#建立模型
my_module = MyModule()
if torch.cuda.is_available():
my_module.cuda()
#设置损失函数
cross_loss = nn.CrossEntropyLoss()
if torch.cuda.is_available():
cross_loss.cuda()
#设置优化器
#设置学习率
learning_rate = 1e-2
optimizer = torch.optim.SGD(my_module.parameters(),lr=learning_rate)
#进行训练
#设置迭代次数
epoch = 10
total_train_steps = 0
writer = SummaryWriter("train_logs")
for i in range(epoch):
print("第{}轮训练".format(i+1))
#训练
my_module.train() #只对某些层起作用
for data in train_dataloader:
imgs, targets = data
if torch.cuda.is_available():
imgs = imgs.cuda()
targets = targets.cuda()
outputs = my_module(imgs)
#计算损失
loss = cross_loss(outputs, targets)
#优化模型
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_steps +=1
if total_train_steps % 100 ==0:
print("训练次数:{},Loss:{}".format(total_train_steps,loss.item()))
writer.add_scalar("train_loss",loss.item(),total_train_steps)
#测试,不再梯度下降
my_module.eval() #同样只对某些层起作用
total_test_loss = 0
# total_test_steps = 0
total_accuracy = 0
test_data_size = len(test_data)
with torch.no_grad():
for data in test_dataloader:
imgs, targets = data
if torch.cuda.is_available():
imgs = imgs.cuda()
targets = targets.cuda()
outputs = my_module(imgs)
loss = cross_loss(outputs,targets)
total_test_loss += loss.item()
##对于分类任务可以求一下准确的个数,非必须
#argmax(1)按行取最大的下标 argmax(0)按列取最大的下标
accuracy = (outputs.argmax(1)==targets).sum()
total_accuracy += accuracy
print("第{}轮的测试集Loss:{}".format(i+1,total_test_loss))
print("测试集准确率:{}".format(total_accuracy/test_data_size))
writer.add_scalar("test_loss",total_test_loss,i)
end_time = time.time()
print("time:{}".format(end_time-start_time))
#存储模型
if i % 5 == 0:
torch.save(my_module.state_dict(),"my_module_{}.pth".format(i))
print("模型存储成功")
writer.close()
使用GPU加速训练(方式二)
现在更常见的写法是用device+.to(device)的搭配,需要引入的位置和方式一提到的没有任何差异,主要就是使用上的语法会有一点点不一样,所以直接给出一个例程,大家看完就知道怎么用了:
python
# train_gpu2.py
import torch
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
import torch.nn as nn
from model import *
from torch.utils.tensorboard import SummaryWriter
data_transforms = transforms.Compose([
transforms.ToTensor()
])
#引入数据集
train_data = datasets.CIFAR10("./dataset",
train=True,
transform=data_transforms,
download=True)
test_data = datasets.CIFAR10("./dataset",
train=False,
transform=data_transforms,
download=True)
#加载数据
train_dataloader = DataLoader(train_data,batch_size=64)
test_dataloader = DataLoader(test_data,batch_size=64)
#确定设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
#建立模型
my_module = MyModule()
my_module.to(device)
#设置损失函数
cross_loss = nn.CrossEntropyLoss()
cross_loss.to(device)
#设置优化器
#设置学习率
learning_rate = 1e-2
optimizer = torch.optim.SGD(my_module.parameters(),lr=learning_rate)
#进行训练
#设置迭代次数
epoch = 10
total_train_steps = 0
writer = SummaryWriter("train_logs")
for i in range(epoch):
print("第{}轮训练".format(i+1))
#训练
my_module.train() #只对某些层起作用
for data in train_dataloader:
imgs, targets = data
imgs, targets = imgs.to(device), targets.to(device)
outputs = my_module(imgs)
#计算损失
loss = cross_loss(outputs, targets)
#优化模型
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_steps +=1
if total_train_steps % 100 ==0:
print("训练次数:{},Loss:{}".format(total_train_steps,loss.item()))
writer.add_scalar("train_loss",loss.item(),total_train_steps)
#测试,不再梯度下降
my_module.eval() #同样只对某些层起作用
total_test_loss = 0
# total_test_steps = 0
total_accuracy = 0
test_data_size = len(test_data)
with torch.no_grad():
for data in test_dataloader:
imgs, targets = data
imgs, targets = imgs.to(device), targets.to(device)
outputs = my_module(imgs)
loss = cross_loss(outputs,targets)
total_test_loss += loss.item()
##对于分类任务可以求一下准确的个数,非必须
#argmax(1)按行取最大的下标 argmax(0)按列取最大的下标
accuracy = (outputs.argmax(1)==targets).sum()
total_accuracy += accuracy
print("第{}轮的测试集Loss:{}".format(i+1,total_test_loss))
print("测试集准确率:{}".format(total_accuracy/test_data_size))
writer.add_scalar("test_loss",total_test_loss,i)
#存储模型
if i % 5 == 0:
torch.save(my_module.state_dict(),"my_module_{}.pth".format(i))
print("模型存储成功")
writer.close()
使用模型完成任务
这个标题我想了很久应该叫什么才能和内容对应上...土堆老师原来名字叫模型验证我没看之前一直以为是evaluate的过程啊啊啊结果是test的过程
实际上我们训练完模型得到一堆准确率啊或者什么的时候并不代表我们完成了整个事情,说人话就是没啥用,所以这部分其实就是教我们怎么用得到的模型在其他数据上使用,这一部分还蛮简单的,和训练中的evaluate部分使用差不多,注意点就是别忘了给图片reshape成含有batch_size的形状(特别是单张的情况下),当然,如果有报错也可以先考虑是不是形状不太对的原因...
python
# test.py
from PIL import Image
import torchvision
from torchvision import transforms
from model import *
image_path = "./test_imgs/cat.jpg"
image = Image.open(image_path)
# image = image.convert('RGB') # 对于png图片要加上这一句
data_transforms = torchvision.transforms.Compose([transforms.Resize((32, 32)),
transforms.ToTensor()])
image = data_transforms(image)
print(image.shape)
model = MyModule()
model.load_state_dict(torch.load("my_module_5.pth"))
image = torch.reshape(image,(1,3,32,32))
model.eval()
with torch.no_grad():
output = model(image)
print(output)
print(output.argmax(1))
# torch.Size([3, 32, 32])
# tensor([[-1.5852, -1.3985, 1.0891, 2.5762, 0.1534, 2.0844, 0.6164, 1.7049,
# -4.7464, -1.4447]])
# tensor([3])
其实我的模型准确率没有很高,但是对于这张图片竟然惊人的分对了(第3类-cat)