# 1.导入依赖包
import torch
import torch.nn as nn
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor
from torchvision.transforms import Compose
import torch.optim as optim
from torch.utils.data import DataLoader
import time
import matplotlib.pyplot as plt
from torchsummary import summary
BATCH_SIZE = 8
# 2. 获取数据集
def create_dataset():
# 加载数据集:训练集数据和测试数据
train = CIFAR10(root='data', train=True, transform=Compose([ToTensor()]))
valid = CIFAR10(root='data', train=False, transform=Compose([ToTensor()]))
# 返回数据集结果
return train, valid
# if __name__ == '__main__':
# # 数据集加载
# train_dataset, valid_dataset = create_dataset()
# # 数据集类别
# print("数据集类别:", train_dataset.class_to_idx)
# # 数据集中的图像数据
# print("训练集数据集:", train_dataset.data.shape)
# print("测试集数据集:", valid_dataset.data.shape)
# # 图像展示
# plt.figure(figsize=(2, 2))
# plt.imshow(train_dataset.data[1])
# plt.title(train_dataset.targets[1])
# plt.show()
# 3.模型构建
class ImageClassification(nn.Module):
# 定义网络结构
def __init__(self):
super(ImageClassification, self).__init__()
# 定义网络层:卷积层+池化层
self.conv1 = nn.Conv2d(3, 6, stride=1, kernel_size=3)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(6, 16, stride=1, kernel_size=3)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
# 全连接层
self.linear1 = nn.Linear(576, 120)
self.linear2 = nn.Linear(120, 84)
self.out = nn.Linear(84, 10)
# 定义前向传播
def forward(self, x):
# 卷积+relu+池化
x = torch.relu(self.conv1(x))
x = self.pool1(x)
# 卷积+relu+池化
x = torch.relu(self.conv2(x))
x = self.pool2(x)
# 将特征图做成以为向量的形式:相当于特征向量
x = x.reshape(x.size(0), -1)
# 全连接层
x = torch.relu(self.linear1(x))
x = torch.relu(self.linear2(x))
# 返回输出结果
return self.out(x)
# if __name__ == '__main__':
# # 模型实例化
# model = ImageClassification()
# summary(model, input_size=(3, 32, 32), batch_size=1)
# 4.训练函数编写
def train(model, train_dataset):
criterion = nn.CrossEntropyLoss() # 构建损失函数
optimizer = optim.Adam(model.parameters(), lr=1e-3) # 构建优化方法
epoch = 20 # 训练轮数
for epoch_idx in range(epoch):
# 构建数据加载器
dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
sam_num = 0 # 样本数量
total_loss = 0.0 # 损失总和
start = time.time() # 开始时间
# 遍历数据进行网络训练
for x, y in dataloader:
output = model(x)
loss = criterion(output, y) # 计算损失
optimizer.zero_grad() # 梯度清零
loss.backward() # 反向传播
optimizer.step() # 参数更新
total_loss += loss.item() # 统计损失和
sam_num += 1
print('epoch:%2s loss:%.5f time:%.2fs' % (epoch_idx + 1, total_loss / sam_num, time.time() - start))
# 模型保存
torch.save(model.state_dict(), 'data/image_classification.pth')
def test(valid_dataset):
# 构建数据加载器
dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=True)
# 加载模型并加载训练好的权重
model = ImageClassification()
model.load_state_dict(torch.load('data/image_classification.pth'))
model.eval()
# 计算精度
total_correct = 0
total_samples = 0
# 遍历每个batch的数据,获取预测结果,计算精度
for x, y in dataloader:
output = model(x)
total_correct += (torch.argmax(output, dim=-1) == y).sum()
total_samples += len(y)
# 打印精度
print('Acc: %.2f' % (total_correct / total_samples))
if __name__ == '__main__':
# 数据集加载
train_dataset, valid_dataset = create_dataset()
# 模型实例化
model = ImageClassification()
# 模型训练
# train(model, train_dataset)
# 模型预测
test(valid_dataset)
卷积神经网络实现图像分类
weixin_431470862024-11-26 8:28
相关推荐
昵称是6硬币3 小时前
YOLOv11: AN OVERVIEW OF THE KEY ARCHITECTURAL ENHANCEMENTS目标检测论文精读(逐段解析)归去_来兮4 小时前
支持向量机(SVM)分类heimeiyingwang9 天前
【深度学习加速探秘】Winograd 卷积算法:让计算效率 “飞” 起来IAM四十二9 天前
Google 端侧 AI 框架 LiteRT 初探小白菜3336669 天前
DAY 37 早停策略和模型权重的保存yizhimie379 天前
DAY 40 训练和测试的规范写法zeroporn9 天前
以玄幻小说方式打开深度学习词嵌入算法!! 使用Skip-gram来完成 Word2Vec 词嵌入(Embedding)丶Darling.9 天前
深度学习与神经网络 | 邱锡鹏 | 第五章学习笔记 卷积神经网络大模型最新论文速读9 天前
Agent成本降低46%:缓存规划器的思路模板丶Darling.9 天前
深度学习与神经网络 | 邱锡鹏 | 第七章学习笔记 网络优化与正则化