Pytorch 猫狗识别案例

猫狗识别数据集https://download.csdn.net/download/Victor_Li_/88483483?spm=1001.2014.3001.5501

训练集图片路径

测试集图片路径

训练代码如下

python 复制代码
import torch
import torchvision
import matplotlib.pyplot as plt
import torchvision.models as models
import torch.nn as nn
import torch.optim as optim
import torch.multiprocessing as mp
import time
from torch.optim.lr_scheduler import StepLR

if __name__ == '__main__':
    torch.autograd.set_detect_anomaly(True)
    mp.freeze_support()
    train_on_gpu = torch.cuda.is_available()
    if not train_on_gpu:
        print('CUDA is not available. Training on CPU...')
    else:
        print('CUDA is available! Training on GPU...')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    batch_size = 32
    # 设置数据预处理的转换
    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((224, 224)),  # 调整图像大小为 224x224
        torchvision.transforms.RandomHorizontalFlip(),
        torchvision.transforms.RandomRotation(45),
        torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
        torchvision.transforms.ToTensor(),  # 转换为张量
        torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])  # 归一化
    ])
    dataset = torchvision.datasets.ImageFolder('./cats_and_dogs_train',
                                               transform=transform)

    val_ratio = 0.2
    val_size = int(len(dataset) * val_ratio)
    train_size = len(dataset) - val_size
    train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])

    train_dataset = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4,
                                                pin_memory=True)
    val_dataset = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, num_workers=4, pin_memory=True)

    # x,y = next(iter(val_dataset))
    # x = x.permute(1, 2, 0)  # 将通道维度调整到最后
    # x = (x - x.min()) / (x.max() - x.min())  # 反归一化操作
    # plt.imshow(x)  # 将通道维度调整到最后
    # plt.axis('off')  # 关闭坐标轴
    # plt.show()

    model = models.resnet34(weights=None)

    num_classes = 2
    model.fc = nn.Sequential(
        nn.Dropout(p=0.2),
        # nn.BatchNorm4d(model.fc.in_features),
        nn.Linear(model.fc.in_features, num_classes),
        nn.Sigmoid(),
    )
    lambda_L1 = 0.001
    lambda_L2 = 0.0001
    regularization_loss_L1 = 0
    regularization_loss_L2 = 0
    for name,param in model.named_parameters():
        param.requires_grad = True
        if 'bias' not in name:
            regularization_loss_L1 += torch.norm(param, p=1).detach()
            regularization_loss_L2 += torch.norm(param, p=2).detach()

    optimizer = optim.Adam(model.parameters(), lr=0.01)
    scheduler = StepLR(optimizer, step_size=5, gamma=0.9)
    criterion = nn.BCELoss().to(device)

    model.to(device)
    # print(model)
    loadfilename = "recognize_cats_and_dogs.pt"
    savefilename = "recognize_cats_and_dogs3.pt"

    checkpoint = torch.load(loadfilename)
    model.load_state_dict(checkpoint['model_state_dict'])


    def save_checkpoint(epoch, model, optimizer, filename, train_loss=0., val_loss=0.):
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'train_loss': train_loss,
            'val_loss': val_loss,
        }
        torch.save(checkpoint, filename)


    num_epochs = 100
    train_loss = []
    for epoch in range(num_epochs):
        running_loss = 0
        correct = 0
        total = 0
        epoch_start_time = time.time()
        for i, (inputs, labels) in enumerate(train_dataset):
            # 将数据放到设备上
            inputs, labels = inputs.to(device), labels.to(device)
            # 前向计算
            outputs = model(inputs)
            one_hot = nn.functional.one_hot(labels, num_classes).float()
            # 计算损失和梯度
            loss = criterion(outputs, one_hot) + lambda_L1 * regularization_loss_L1 + lambda_L2 * regularization_loss_L2
            loss.backward()
            if ((i + 1) % 2 == 0) or (i + 1 == len(train_dataset)):
                # 更新模型参数
                optimizer.step()
                optimizer.zero_grad()

            # 记录损失和准确率
            running_loss += loss.item()
            train_loss.append(loss.item())
            _, predicted = torch.max(outputs.data, 1)
            correct += (predicted == labels).sum().item()
            total += labels.size(0)
        accuracy_train = 100 * correct / total
        # 在测试集上计算准确率
        with torch.no_grad():
            running_loss_test = 0
            correct_test = 0
            total_test = 0
            for inputs, labels in val_dataset:
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                one_hot = nn.functional.one_hot(labels, num_classes).float()
                loss = criterion(outputs, one_hot)
                running_loss_test += loss.item()

                _, predicted = torch.max(outputs.data, 1)
                correct_test += (predicted == labels).sum().item()
                total_test += labels.size(0)
            accuracy_test = 100 * correct_test / total_test
            # 输出每个 epoch 的损失和准确率
        epoch_end_time = time.time()
        epoch_time = epoch_end_time - epoch_start_time
        tain_loss = running_loss / len(train_dataset)
        val_loss = running_loss_test / len(val_dataset)
        print(
            "Epoch [{}/{}], Time: {:.4f}s, Loss: {:.4f}, Train Accuracy: {:.2f}%, Loss: {:.4f}, Test Accuracy: {:.2f}%"
            .format(epoch + 1, num_epochs, epoch_time, tain_loss,
                    accuracy_train, val_loss, accuracy_test))
        save_checkpoint(epoch, model, optimizer, savefilename, tain_loss, val_loss)
        scheduler.step()

    # plt.plot(train_loss, label='Train Loss')
    # # 添加图例和标签
    # plt.legend()
    # plt.xlabel('Epochs')
    # plt.ylabel('Loss')
    # plt.title('Training Loss')
    #
    # # 显示图形
    # plt.show()

测试代码如下

python 复制代码
import torch
import torchvision
import torch.nn as nn
import torchvision.models as models
import matplotlib.pyplot as plt
import torch.multiprocessing as mp

if __name__ == '__main__':
    mp.freeze_support()
    train_on_gpu = torch.cuda.is_available()
    if not train_on_gpu:
        print('CUDA is not available. Training on CPU...')
    else:
        print('CUDA is available! Training on GPU...')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    batch_size = 32
    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((224,224)),  # 调整图像大小为 224x224
        torchvision.transforms.ToTensor(),  # 转换为张量
        torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])  # 归一化
    ])
    dataset = torchvision.datasets.ImageFolder('./cats_and_dogs_test',
                                                     transform=transform)

    test_dataset = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,num_workers=4, pin_memory=True)

    model = models.resnet34()

    num_classes = 2
    for param in model.parameters():
        param.requires_grad = False

    model.fc = nn.Sequential(
        nn.Dropout(),
        nn.Linear(model.fc.in_features,num_classes),
        nn.LogSoftmax(dim=1)
    )
    model.to(device)
    # print(model)

    filename = "recognize_cats_and_dogs.pt"
    checkpoint = torch.load(filename)
    model.load_state_dict(checkpoint['model_state_dict'])

    class_name = ['cat','dog']
    # 在测试集上计算准确率
    with torch.no_grad():
        for inputs, labels in test_dataset:
            inputs, labels = inputs.to(device), labels.to(device)
            output = model(inputs)
            _, predicted = torch.max(output.data, 1)
            for x,y,z in zip(inputs,labels,predicted):
                x = (x - x.min()) / (x.max() - x.min())
                plt.imshow(x.cpu().permute(1,2,0))
                plt.axis('off')
                plt.title('predicted: {0}'.format(class_name[z]))
                plt.show()

部分测试结果如下

相关推荐
林林宋1 天前
Step-Audio-R1
人工智能
这张生成的图像能检测吗1 天前
(论文速读)面向视觉语言模型组合性理解可视分析方法
人工智能·视觉语言模型·可视化理解
Data_agent1 天前
1688获得1688店铺列表API,python请求示例
开发语言·python·算法
2401_871260021 天前
Java学习笔记(二)面向对象
java·python·学习
田里的水稻1 天前
DT_digital_twin_ROS+Grazebo仿真
深度学习·数据挖掘·数据分析
2301_764441331 天前
使用python构建的应急物资代储博弈模型
开发语言·python·算法
qq_348231851 天前
AI 驱动-前端源码生成测试
人工智能
飞Link1 天前
GDN:深度学习时代的图偏差网络异常检测全解析
网络·人工智能·深度学习
喏喏心1 天前
深度强化学习:价值迭代与Bellman方程实践
人工智能·python·学习·机器学习
阿杰学AI1 天前
AI核心知识48——大语言模型之Synthetic Data(简洁且通俗易懂版)
人工智能·ai·语言模型·aigc·合成数据·synthetic data·模型崩溃