J1学习打卡

python 复制代码
# 数据预处理和加载
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models
import matplotlib.pyplot as plt
import numpy as np
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_dir = r"C:\Users\11054\Desktop\kLearning\J1_learning\bird_photos"

transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
])

train_dataset = datasets.ImageFolder(data_dir, transform=transform)
train_size = int(0.8 * len(train_dataset))
val_size = len(train_dataset) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_size, val_size])

train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=8, shuffle=False)

class_names = train_dataset.dataset.classes
python 复制代码
# 定义ResNet 模型
import torch
import torch.nn as nn
import torch.nn.functional as F

class IdentityBlock(nn.Module):
    def __init__(self, in_channels, filters, kernel_size):
        super(IdentityBlock, self).__init__()
        filters1, filters2, filters3 = filters
        self.conv1 = nn.Conv2d(in_channels, filters1, kernel_size=1)
        self.bn1 = nn.BatchNorm2d(filters1)

        self.conv2 = nn.Conv2d(filters1, filters2, kernel_size=kernel_size, padding=1)
        self.bn2 = nn.BatchNorm2d(filters2)

        self.conv3 = nn.Conv2d(filters2, filters3, kernel_size=1)
        self.bn3 = nn.BatchNorm2d(filters3)

    def forward(self, x):
        shortcut = x

        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = F.relu(x)

        x = self.conv3(x)
        x = self.bn3(x)

        x += shortcut
        x = F.relu(x)
        return x

class ConvBlock(nn.Module):
    def __init__(self, in_channels, filters, kernel_size, strides):
        super(ConvBlock, self).__init__()
        filters1, filters2, filters3 = filters

        self.conv1 = nn.Conv2d(in_channels, filters1, kernel_size=1, stride=strides)
        self.bn1 = nn.BatchNorm2d(filters1)

        self.conv2 = nn.Conv2d(filters1, filters2, kernel_size=kernel_size, padding=1)
        self.bn2 = nn.BatchNorm2d(filters2)

        self.conv3 = nn.Conv2d(filters2, filters3, kernel_size=1)
        self.bn3 = nn.BatchNorm2d(filters3)

        self.shortcut_conv = nn.Conv2d(in_channels, filters3, kernel_size=1, stride=strides)
        self.shortcut_bn = nn.BatchNorm2d(filters3)

    def forward(self, x):
        shortcut = self.shortcut_conv(x)
        shortcut = self.shortcut_bn(shortcut)

        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = F.relu(x)

        x = self.conv3(x)
        x = self.bn3(x)

        x += shortcut
        x = F.relu(x)
        return x

class ResNet50(nn.Module):
    def __init__(self, num_classes=1000):
        super(ResNet50, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2d(64)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer2 = self._make_layer(64, [64, 64, 256], 3, stride=1)
        self.layer3 = self._make_layer(256, [128, 128, 512], 4, stride=2)
        self.layer4 = self._make_layer(512, [256, 256, 1024], 6, stride=2)
        self.layer5 = self._make_layer(1024, [512, 512, 2048], 3, stride=2)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(2048, num_classes)

    def _make_layer(self, in_channels, filters, blocks, stride):
        layers = []
        layers.append(ConvBlock(in_channels, filters, 3, stride))
        for _ in range(1, blocks):
            layers.append(IdentityBlock(filters[2], filters, 3))
        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)
        x = self.maxpool(x)

        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)
        return x

# Example of initialization
model = ResNet50(num_classes=4)
# model = models.resnet50(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, len(class_names))
model = model.to(device)
python 复制代码
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
python 复制代码
# 训练模型
epochs = 100
train_losses, val_losses = [], []
train_acc, val_acc = [], []

best_val_loss = float('inf')
best_model_wts = None  # 用于保存最好的模型权重
for epoch in range(epochs):
    # Training
    model.train()
    running_loss, running_corrects = 0.0, 0

    for inputs, labels in train_loader:
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item() * inputs.size(0)
        _, preds = torch.max(outputs, 1)
        running_corrects += torch.sum(preds == labels.data)

    epoch_loss = running_loss / train_size
    epoch_acc = running_corrects.double() / train_size

    train_losses.append(epoch_loss)
    train_acc.append(epoch_acc.item())

    # Validation
    model.eval()
    val_running_loss, val_running_corrects = 0.0, 0

    with torch.no_grad():
        for inputs, labels in val_loader:
            inputs, labels = inputs.to(device), labels.to(device)

            outputs = model(inputs)
            loss = criterion(outputs, labels)

            val_running_loss += loss.item() * inputs.size(0)
            _, preds = torch.max(outputs, 1)
            val_running_corrects += torch.sum(preds == labels.data)

    val_epoch_loss = val_running_loss / val_size
    val_epoch_acc = val_running_corrects.double() / val_size

    val_losses.append(val_epoch_loss)
    val_acc.append(val_epoch_acc.item())
    if val_epoch_loss < best_val_loss:
        best_val_loss = val_epoch_loss
        best_model_wts = model.state_dict()  # 记录当前模型的权重
    print(f'Epoch {epoch}/{epochs-1}, Train Loss: {epoch_loss:.4f}, Train Acc: {epoch_acc:.4f}, Val Loss: {val_epoch_loss:.4f}, Val Acc: {val_epoch_acc:.4f}')

# 在训练结束后,加载最优的模型权重
model.load_state_dict(best_model_wts)
Epoch 0/99, Train Loss: 0.5125, Train Acc: 0.8119, Val Loss: 3.1043, Val Acc: 0.5221
Epoch 1/99, Train Loss: 0.6260, Train Acc: 0.7788, Val Loss: 0.6525, Val Acc: 0.7522
Epoch 2/99, Train Loss: 0.4564, Train Acc: 0.8429, Val Loss: 1.2441, Val Acc: 0.6814
Epoch 3/99, Train Loss: 0.4463, Train Acc: 0.8230, Val Loss: 0.8466, Val Acc: 0.7699
Epoch 4/99, Train Loss: 0.5827, Train Acc: 0.7898, Val Loss: 0.8394, Val Acc: 0.7788
Epoch 5/99, Train Loss: 0.4685, Train Acc: 0.8385, Val Loss: 0.6826, Val Acc: 0.8142
Epoch 6/99, Train Loss: 0.3892, Train Acc: 0.8606, Val Loss: 0.6440, Val Acc: 0.7699
Epoch 7/99, Train Loss: 0.4116, Train Acc: 0.8606, Val Loss: 0.7322, Val Acc: 0.7876
Epoch 8/99, Train Loss: 0.3453, Train Acc: 0.8872, Val Loss: 0.9246, Val Acc: 0.7522
Epoch 9/99, Train Loss: 0.2964, Train Acc: 0.8805, Val Loss: 0.5056, Val Acc: 0.8584
Epoch 10/99, Train Loss: 0.3832, Train Acc: 0.8739, Val Loss: 0.8432, Val Acc: 0.7345
Epoch 11/99, Train Loss: 0.4082, Train Acc: 0.8628, Val Loss: 1.2504, Val Acc: 0.6637
Epoch 12/99, Train Loss: 0.3812, Train Acc: 0.8562, Val Loss: 0.5781, Val Acc: 0.7699
Epoch 13/99, Train Loss: 0.2939, Train Acc: 0.8982, Val Loss: 0.6404, Val Acc: 0.8407
Epoch 14/99, Train Loss: 0.2671, Train Acc: 0.8960, Val Loss: 0.6122, Val Acc: 0.8230
Epoch 15/99, Train Loss: 0.3338, Train Acc: 0.8850, Val Loss: 0.9396, Val Acc: 0.7699
Epoch 16/99, Train Loss: 0.3182, Train Acc: 0.8872, Val Loss: 0.7527, Val Acc: 0.8584
Epoch 17/99, Train Loss: 0.2798, Train Acc: 0.9137, Val Loss: 0.7588, Val Acc: 0.7522
Epoch 18/99, Train Loss: 0.2432, Train Acc: 0.9159, Val Loss: 0.8711, Val Acc: 0.7699
Epoch 19/99, Train Loss: 0.2381, Train Acc: 0.9204, Val Loss: 0.6623, Val Acc: 0.7965
Epoch 20/99, Train Loss: 0.2503, Train Acc: 0.9159, Val Loss: 1.0319, Val Acc: 0.7168
Epoch 21/99, Train Loss: 0.3165, Train Acc: 0.9049, Val Loss: 0.5331, Val Acc: 0.8496
Epoch 22/99, Train Loss: 0.2036, Train Acc: 0.9292, Val Loss: 0.8623, Val Acc: 0.7611
Epoch 23/99, Train Loss: 0.2089, Train Acc: 0.9292, Val Loss: 0.8315, Val Acc: 0.8142
Epoch 24/99, Train Loss: 0.2094, Train Acc: 0.9336, Val Loss: 0.5755, Val Acc: 0.8053
Epoch 25/99, Train Loss: 0.0996, Train Acc: 0.9690, Val Loss: 0.6812, Val Acc: 0.7699
Epoch 26/99, Train Loss: 0.1375, Train Acc: 0.9558, Val Loss: 0.4544, Val Acc: 0.8850
Epoch 27/99, Train Loss: 0.1011, Train Acc: 0.9646, Val Loss: 0.5622, Val Acc: 0.8407
Epoch 28/99, Train Loss: 0.1597, Train Acc: 0.9447, Val Loss: 0.5689, Val Acc: 0.8407
Epoch 29/99, Train Loss: 0.1708, Train Acc: 0.9491, Val Loss: 0.6313, Val Acc: 0.8319
Epoch 30/99, Train Loss: 0.0951, Train Acc: 0.9668, Val Loss: 0.5573, Val Acc: 0.8496
Epoch 31/99, Train Loss: 0.1465, Train Acc: 0.9602, Val Loss: 0.5064, Val Acc: 0.8584
Epoch 32/99, Train Loss: 0.1095, Train Acc: 0.9624, Val Loss: 0.6120, Val Acc: 0.8319
Epoch 33/99, Train Loss: 0.1096, Train Acc: 0.9690, Val Loss: 0.6218, Val Acc: 0.8053
Epoch 34/99, Train Loss: 0.0894, Train Acc: 0.9646, Val Loss: 0.4840, Val Acc: 0.8673
Epoch 35/99, Train Loss: 0.1467, Train Acc: 0.9314, Val Loss: 0.5605, Val Acc: 0.8761
Epoch 36/99, Train Loss: 0.2331, Train Acc: 0.9447, Val Loss: 0.7342, Val Acc: 0.7876
Epoch 37/99, Train Loss: 0.1630, Train Acc: 0.9336, Val Loss: 0.5327, Val Acc: 0.8496
Epoch 38/99, Train Loss: 0.1293, Train Acc: 0.9624, Val Loss: 1.0636, Val Acc: 0.7434
Epoch 39/99, Train Loss: 0.0954, Train Acc: 0.9646, Val Loss: 0.4450, Val Acc: 0.8938
Epoch 40/99, Train Loss: 0.0402, Train Acc: 0.9912, Val Loss: 0.5242, Val Acc: 0.8407
Epoch 41/99, Train Loss: 0.1280, Train Acc: 0.9624, Val Loss: 0.5269, Val Acc: 0.8319
Epoch 42/99, Train Loss: 0.0788, Train Acc: 0.9779, Val Loss: 0.6632, Val Acc: 0.8319
Epoch 43/99, Train Loss: 0.1128, Train Acc: 0.9668, Val Loss: 0.3365, Val Acc: 0.8761
Epoch 44/99, Train Loss: 0.1162, Train Acc: 0.9646, Val Loss: 0.6866, Val Acc: 0.8142
Epoch 45/99, Train Loss: 0.0266, Train Acc: 0.9956, Val Loss: 0.3973, Val Acc: 0.8850
Epoch 46/99, Train Loss: 0.0931, Train Acc: 0.9690, Val Loss: 0.6352, Val Acc: 0.8319
Epoch 47/99, Train Loss: 0.0777, Train Acc: 0.9735, Val Loss: 0.5743, Val Acc: 0.8496
Epoch 48/99, Train Loss: 0.0473, Train Acc: 0.9889, Val Loss: 0.5463, Val Acc: 0.8319
Epoch 49/99, Train Loss: 0.1480, Train Acc: 0.9535, Val Loss: 1.0142, Val Acc: 0.8407
Epoch 50/99, Train Loss: 0.1329, Train Acc: 0.9513, Val Loss: 0.4691, Val Acc: 0.8673
Epoch 51/99, Train Loss: 0.0330, Train Acc: 0.9867, Val Loss: 0.4812, Val Acc: 0.8496
Epoch 52/99, Train Loss: 0.1050, Train Acc: 0.9535, Val Loss: 0.7743, Val Acc: 0.7788
Epoch 53/99, Train Loss: 0.0767, Train Acc: 0.9735, Val Loss: 0.6740, Val Acc: 0.8142
Epoch 54/99, Train Loss: 0.0483, Train Acc: 0.9779, Val Loss: 0.6069, Val Acc: 0.8230
Epoch 55/99, Train Loss: 0.0923, Train Acc: 0.9757, Val Loss: 0.5565, Val Acc: 0.8673
Epoch 56/99, Train Loss: 0.0940, Train Acc: 0.9690, Val Loss: 0.6511, Val Acc: 0.8230
Epoch 57/99, Train Loss: 0.0310, Train Acc: 0.9867, Val Loss: 0.4568, Val Acc: 0.8496
Epoch 58/99, Train Loss: 0.0073, Train Acc: 1.0000, Val Loss: 0.4516, Val Acc: 0.8496
Epoch 59/99, Train Loss: 0.0033, Train Acc: 1.0000, Val Loss: 0.4458, Val Acc: 0.8850
Epoch 60/99, Train Loss: 0.0055, Train Acc: 0.9978, Val Loss: 0.4935, Val Acc: 0.8584
Epoch 61/99, Train Loss: 0.0030, Train Acc: 1.0000, Val Loss: 0.5033, Val Acc: 0.8496
Epoch 62/99, Train Loss: 0.0098, Train Acc: 0.9956, Val Loss: 0.3741, Val Acc: 0.8673
Epoch 63/99, Train Loss: 0.0201, Train Acc: 0.9889, Val Loss: 0.4065, Val Acc: 0.8584
Epoch 64/99, Train Loss: 0.0158, Train Acc: 0.9956, Val Loss: 0.4000, Val Acc: 0.9027
Epoch 65/99, Train Loss: 0.0077, Train Acc: 0.9978, Val Loss: 0.4236, Val Acc: 0.8761
Epoch 66/99, Train Loss: 0.0034, Train Acc: 1.0000, Val Loss: 0.4047, Val Acc: 0.8938
Epoch 67/99, Train Loss: 0.0099, Train Acc: 0.9978, Val Loss: 0.4296, Val Acc: 0.8673
Epoch 68/99, Train Loss: 0.0170, Train Acc: 0.9956, Val Loss: 0.4366, Val Acc: 0.9115
Epoch 69/99, Train Loss: 0.0578, Train Acc: 0.9867, Val Loss: 0.9006, Val Acc: 0.7699
Epoch 70/99, Train Loss: 0.1552, Train Acc: 0.9624, Val Loss: 1.0190, Val Acc: 0.7522
Epoch 71/99, Train Loss: 0.3006, Train Acc: 0.9071, Val Loss: 1.7312, Val Acc: 0.6726
Epoch 72/99, Train Loss: 0.1259, Train Acc: 0.9535, Val Loss: 0.5290, Val Acc: 0.8496
Epoch 73/99, Train Loss: 0.0361, Train Acc: 0.9845, Val Loss: 0.5585, Val Acc: 0.8319
Epoch 74/99, Train Loss: 0.0485, Train Acc: 0.9779, Val Loss: 0.6037, Val Acc: 0.8407
Epoch 75/99, Train Loss: 0.2948, Train Acc: 0.9049, Val Loss: 1.4896, Val Acc: 0.6726
Epoch 76/99, Train Loss: 0.2515, Train Acc: 0.9270, Val Loss: 1.3241, Val Acc: 0.7080
Epoch 77/99, Train Loss: 0.1719, Train Acc: 0.9403, Val Loss: 0.9907, Val Acc: 0.7876
Epoch 78/99, Train Loss: 0.0785, Train Acc: 0.9779, Val Loss: 0.8646, Val Acc: 0.7699
Epoch 79/99, Train Loss: 0.0347, Train Acc: 0.9889, Val Loss: 0.5678, Val Acc: 0.8407
Epoch 80/99, Train Loss: 0.1509, Train Acc: 0.9447, Val Loss: 0.5656, Val Acc: 0.8142
Epoch 81/99, Train Loss: 0.0736, Train Acc: 0.9779, Val Loss: 0.6753, Val Acc: 0.8053
Epoch 82/99, Train Loss: 0.0637, Train Acc: 0.9823, Val Loss: 0.5300, Val Acc: 0.8584
Epoch 83/99, Train Loss: 0.0454, Train Acc: 0.9757, Val Loss: 0.5306, Val Acc: 0.8584
Epoch 84/99, Train Loss: 0.0407, Train Acc: 0.9889, Val Loss: 0.4931, Val Acc: 0.8407
Epoch 85/99, Train Loss: 0.0100, Train Acc: 1.0000, Val Loss: 0.4908, Val Acc: 0.8673
Epoch 86/99, Train Loss: 0.0071, Train Acc: 0.9978, Val Loss: 0.4836, Val Acc: 0.8761
Epoch 87/99, Train Loss: 0.0094, Train Acc: 0.9978, Val Loss: 0.4489, Val Acc: 0.8761
Epoch 88/99, Train Loss: 0.0033, Train Acc: 1.0000, Val Loss: 0.4582, Val Acc: 0.8761
Epoch 89/99, Train Loss: 0.0015, Train Acc: 1.0000, Val Loss: 0.4960, Val Acc: 0.8761
Epoch 90/99, Train Loss: 0.0027, Train Acc: 1.0000, Val Loss: 0.5174, Val Acc: 0.8584
Epoch 91/99, Train Loss: 0.0086, Train Acc: 0.9978, Val Loss: 0.5599, Val Acc: 0.8319
Epoch 92/99, Train Loss: 0.0074, Train Acc: 0.9978, Val Loss: 0.4926, Val Acc: 0.8673
Epoch 93/99, Train Loss: 0.0052, Train Acc: 1.0000, Val Loss: 0.4914, Val Acc: 0.8407
Epoch 94/99, Train Loss: 0.0025, Train Acc: 1.0000, Val Loss: 0.5375, Val Acc: 0.8584
Epoch 95/99, Train Loss: 0.0013, Train Acc: 1.0000, Val Loss: 0.5106, Val Acc: 0.8761
Epoch 96/99, Train Loss: 0.0011, Train Acc: 1.0000, Val Loss: 0.4826, Val Acc: 0.8584
Epoch 97/99, Train Loss: 0.0088, Train Acc: 0.9978, Val Loss: 0.4799, Val Acc: 0.8584
Epoch 98/99, Train Loss: 0.1045, Train Acc: 0.9535, Val Loss: 0.7483, Val Acc: 0.8230
Epoch 99/99, Train Loss: 0.1666, Train Acc: 0.9425, Val Loss: 0.8003, Val Acc: 0.8319
python 复制代码
# 预测模型
model.eval()
plt.figure(figsize=(10, 5))
plt.suptitle("bird")

for inputs, labels in val_loader:
    inputs, labels = inputs.to(device), labels.to(device)
    outputs = model(inputs)
    _, preds = torch.max(outputs, 1)

    for i in range(len(inputs)):
        ax = plt.subplot(2, 4, i + 1)

        img = inputs[i].cpu().numpy().transpose((1, 2, 0))
        plt.imshow(img)
        plt.title(class_names[preds[i]])

        plt.axis("off")
    break

个人总结

  • 完成了tensorflow到pytorch代码的转换
  • 了解了CNN网络发展历史和残差网络由来
  • 增加训练次数获得了较为准确的模型
相关推荐
Meowmow1 小时前
React学习01 jsx、组件与组件的三大属性
前端·学习·react.js
艾伦~耶格尔1 小时前
Maven 高级之分模块设计与继承、聚合
java·后端·学习·maven·项目管理
一个儒雅随和的男子1 小时前
连肝了多天学习MySQL索引与性能优化,详细总结一下索引的使用与数据库优化
数据库·学习·mysql
CT随1 小时前
学习文档三
学习
百里香酚兰1 小时前
【Unity学习笔记】解决疑似升级Win11或使用Unity6导致Unity旧版本无法打开的问题
笔记·学习·unity
Red Red1 小时前
网络安全知识|网安问答题|OSPF报文协议|抓包工具|路由器环路|序列化与反序列化|磁盘利用率|网络攻防
网络·笔记·学习·安全·web安全·求职招聘·秋招
Best_Me071 小时前
python学习-怎么在Pycharm写代码
python·学习·pycharm
向上的车轮2 小时前
Django学习笔记十三:优秀案例学习
笔记·学习·django
HC182580858322 小时前
实用生活英语口语学习成人零基础入门柯桥专业外语培训
学习·职场和发展
2301_775281192 小时前
柯桥生活口语学习之在化妆品店可以用到的韩语句子
学习·生活