Pytorch 猫狗识别案例

猫狗识别数据集https://download.csdn.net/download/Victor_Li_/88483483?spm=1001.2014.3001.5501

训练集图片路径

测试集图片路径

训练代码如下

python 复制代码
import torch
import torchvision
import matplotlib.pyplot as plt
import torchvision.models as models
import torch.nn as nn
import torch.optim as optim
import torch.multiprocessing as mp
import time
from torch.optim.lr_scheduler import StepLR

if __name__ == '__main__':
    torch.autograd.set_detect_anomaly(True)
    mp.freeze_support()
    train_on_gpu = torch.cuda.is_available()
    if not train_on_gpu:
        print('CUDA is not available. Training on CPU...')
    else:
        print('CUDA is available! Training on GPU...')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    batch_size = 32
    # 设置数据预处理的转换
    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((224, 224)),  # 调整图像大小为 224x224
        torchvision.transforms.RandomHorizontalFlip(),
        torchvision.transforms.RandomRotation(45),
        torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
        torchvision.transforms.ToTensor(),  # 转换为张量
        torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])  # 归一化
    ])
    dataset = torchvision.datasets.ImageFolder('./cats_and_dogs_train',
                                               transform=transform)

    val_ratio = 0.2
    val_size = int(len(dataset) * val_ratio)
    train_size = len(dataset) - val_size
    train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])

    train_dataset = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4,
                                                pin_memory=True)
    val_dataset = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, num_workers=4, pin_memory=True)

    # x,y = next(iter(val_dataset))
    # x = x.permute(1, 2, 0)  # 将通道维度调整到最后
    # x = (x - x.min()) / (x.max() - x.min())  # 反归一化操作
    # plt.imshow(x)  # 将通道维度调整到最后
    # plt.axis('off')  # 关闭坐标轴
    # plt.show()

    model = models.resnet34(weights=None)

    num_classes = 2
    model.fc = nn.Sequential(
        nn.Dropout(p=0.2),
        # nn.BatchNorm4d(model.fc.in_features),
        nn.Linear(model.fc.in_features, num_classes),
        nn.Sigmoid(),
    )
    lambda_L1 = 0.001
    lambda_L2 = 0.0001
    regularization_loss_L1 = 0
    regularization_loss_L2 = 0
    for name,param in model.named_parameters():
        param.requires_grad = True
        if 'bias' not in name:
            regularization_loss_L1 += torch.norm(param, p=1).detach()
            regularization_loss_L2 += torch.norm(param, p=2).detach()

    optimizer = optim.Adam(model.parameters(), lr=0.01)
    scheduler = StepLR(optimizer, step_size=5, gamma=0.9)
    criterion = nn.BCELoss().to(device)

    model.to(device)
    # print(model)
    loadfilename = "recognize_cats_and_dogs.pt"
    savefilename = "recognize_cats_and_dogs3.pt"

    checkpoint = torch.load(loadfilename)
    model.load_state_dict(checkpoint['model_state_dict'])


    def save_checkpoint(epoch, model, optimizer, filename, train_loss=0., val_loss=0.):
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'train_loss': train_loss,
            'val_loss': val_loss,
        }
        torch.save(checkpoint, filename)


    num_epochs = 100
    train_loss = []
    for epoch in range(num_epochs):
        running_loss = 0
        correct = 0
        total = 0
        epoch_start_time = time.time()
        for i, (inputs, labels) in enumerate(train_dataset):
            # 将数据放到设备上
            inputs, labels = inputs.to(device), labels.to(device)
            # 前向计算
            outputs = model(inputs)
            one_hot = nn.functional.one_hot(labels, num_classes).float()
            # 计算损失和梯度
            loss = criterion(outputs, one_hot) + lambda_L1 * regularization_loss_L1 + lambda_L2 * regularization_loss_L2
            loss.backward()
            if ((i + 1) % 2 == 0) or (i + 1 == len(train_dataset)):
                # 更新模型参数
                optimizer.step()
                optimizer.zero_grad()

            # 记录损失和准确率
            running_loss += loss.item()
            train_loss.append(loss.item())
            _, predicted = torch.max(outputs.data, 1)
            correct += (predicted == labels).sum().item()
            total += labels.size(0)
        accuracy_train = 100 * correct / total
        # 在测试集上计算准确率
        with torch.no_grad():
            running_loss_test = 0
            correct_test = 0
            total_test = 0
            for inputs, labels in val_dataset:
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = model(inputs)
                one_hot = nn.functional.one_hot(labels, num_classes).float()
                loss = criterion(outputs, one_hot)
                running_loss_test += loss.item()

                _, predicted = torch.max(outputs.data, 1)
                correct_test += (predicted == labels).sum().item()
                total_test += labels.size(0)
            accuracy_test = 100 * correct_test / total_test
            # 输出每个 epoch 的损失和准确率
        epoch_end_time = time.time()
        epoch_time = epoch_end_time - epoch_start_time
        tain_loss = running_loss / len(train_dataset)
        val_loss = running_loss_test / len(val_dataset)
        print(
            "Epoch [{}/{}], Time: {:.4f}s, Loss: {:.4f}, Train Accuracy: {:.2f}%, Loss: {:.4f}, Test Accuracy: {:.2f}%"
            .format(epoch + 1, num_epochs, epoch_time, tain_loss,
                    accuracy_train, val_loss, accuracy_test))
        save_checkpoint(epoch, model, optimizer, savefilename, tain_loss, val_loss)
        scheduler.step()

    # plt.plot(train_loss, label='Train Loss')
    # # 添加图例和标签
    # plt.legend()
    # plt.xlabel('Epochs')
    # plt.ylabel('Loss')
    # plt.title('Training Loss')
    #
    # # 显示图形
    # plt.show()

测试代码如下

python 复制代码
import torch
import torchvision
import torch.nn as nn
import torchvision.models as models
import matplotlib.pyplot as plt
import torch.multiprocessing as mp

if __name__ == '__main__':
    mp.freeze_support()
    train_on_gpu = torch.cuda.is_available()
    if not train_on_gpu:
        print('CUDA is not available. Training on CPU...')
    else:
        print('CUDA is available! Training on GPU...')

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    batch_size = 32
    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((224,224)),  # 调整图像大小为 224x224
        torchvision.transforms.ToTensor(),  # 转换为张量
        torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])  # 归一化
    ])
    dataset = torchvision.datasets.ImageFolder('./cats_and_dogs_test',
                                                     transform=transform)

    test_dataset = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,num_workers=4, pin_memory=True)

    model = models.resnet34()

    num_classes = 2
    for param in model.parameters():
        param.requires_grad = False

    model.fc = nn.Sequential(
        nn.Dropout(),
        nn.Linear(model.fc.in_features,num_classes),
        nn.LogSoftmax(dim=1)
    )
    model.to(device)
    # print(model)

    filename = "recognize_cats_and_dogs.pt"
    checkpoint = torch.load(filename)
    model.load_state_dict(checkpoint['model_state_dict'])

    class_name = ['cat','dog']
    # 在测试集上计算准确率
    with torch.no_grad():
        for inputs, labels in test_dataset:
            inputs, labels = inputs.to(device), labels.to(device)
            output = model(inputs)
            _, predicted = torch.max(output.data, 1)
            for x,y,z in zip(inputs,labels,predicted):
                x = (x - x.min()) / (x.max() - x.min())
                plt.imshow(x.cpu().permute(1,2,0))
                plt.axis('off')
                plt.title('predicted: {0}'.format(class_name[z]))
                plt.show()

部分测试结果如下

相关推荐
坚毅不拔的柠檬柠檬10 分钟前
AI革命下的多元生态:DeepSeek、ChatGPT、XAI、文心一言与通义千问的行业渗透与场景重构
人工智能·chatgpt·文心一言
坚毅不拔的柠檬柠檬14 分钟前
2025:人工智能重构人类文明的新纪元
人工智能·重构
jixunwulian21 分钟前
DeepSeek赋能AI边缘计算网关,开启智能新时代!
人工智能·边缘计算
Archie_IT28 分钟前
DeepSeek R1/V3满血版——在线体验与API调用
人工智能·深度学习·ai·自然语言处理
失败尽常态52335 分钟前
用Python实现Excel数据同步到飞书文档
python·excel·飞书
2501_9044477437 分钟前
OPPO发布新型折叠屏手机 起售价8999
python·智能手机·django·virtualenv·pygame
青龙小码农37 分钟前
yum报错:bash: /usr/bin/yum: /usr/bin/python: 坏的解释器:没有那个文件或目录
开发语言·python·bash·liunx
大数据追光猿42 分钟前
Python应用算法之贪心算法理解和实践
大数据·开发语言·人工智能·python·深度学习·算法·贪心算法
Leuanghing1 小时前
【Leetcode】11. 盛最多水的容器
python·算法·leetcode
灵感素材坊2 小时前
解锁音乐创作新技能:AI音乐网站的正确使用方式
人工智能·经验分享·音视频