Python打卡第54天

@浙大疏锦行

作业:

  1. 对inception网络在cifar10上观察精度
python 复制代码
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR

# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 数据增强和加载
def get_dataloaders():
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(
        root='./data', train=True, download=True, transform=transform_train)
    trainloader = DataLoader(
        trainset, batch_size=128, shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR10(
        root='./data', train=False, download=True, transform=transform_test)
    testloader = DataLoader(
        testset, batch_size=100, shuffle=False, num_workers=2)

    return trainloader, testloader

class BasicConv2d(nn.Module):
    def __init__(self, in_channels, out_channels, **kwargs):
        super(BasicConv2d, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
        self.bn = nn.BatchNorm2d(out_channels)
        
    def forward(self, x):
        return F.relu(self.bn(self.conv(x)))

class Inception(nn.Module):
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
        super(Inception, self).__init__()
        # 1x1分支
        self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)
        
        # 1x1 -> 3x3分支
        self.branch2 = nn.Sequential(
            BasicConv2d(in_channels, ch3x3red, kernel_size=1),
            BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1)
        )
        
        # 1x1 -> 5x5分支
        self.branch3 = nn.Sequential(
            BasicConv2d(in_channels, ch5x5red, kernel_size=1),
            BasicConv2d(ch5x5red, ch5x5, kernel_size=5, padding=2)
        )
        
        # 3x3池化 -> 1x1分支
        self.branch4 = nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
            BasicConv2d(in_channels, pool_proj, kernel_size=1)
        )
    
    def forward(self, x):
        return torch.cat([
            self.branch1(x),
            self.branch2(x),
            self.branch3(x),
            self.branch4(x)
        ], 1)

class InceptionNet_CIFAR(nn.Module):
    def __init__(self, num_classes=10):
        super(InceptionNet_CIFAR, self).__init__()
        
        # 适应CIFAR-10的初始层
        self.stem = nn.Sequential(
            BasicConv2d(3, 64, kernel_size=3, stride=1, padding=1),
            BasicConv2d(64, 64, kernel_size=3, padding=1),
            BasicConv2d(64, 64, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )
        
        # 修正通道数计算
        # inception3a的输出通道数 = 32+32+16+16 = 96
        self.inception3a = Inception(64, 32, 32, 32, 16, 16, 16)
        
        # inception3b的输入通道数应为96(来自inception3a的输出)
        self.inception3b = Inception(96, 64, 32, 64, 16, 32, 32)
        
        # 分类头(inception3b的输出通道数 = 64+64+32+32 = 192)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.dropout = nn.Dropout(0.5)
        self.fc = nn.Linear(192, num_classes)
    
    def forward(self, x):
        x = self.stem(x)  # [B, 64, 16, 16]
        x = self.inception3a(x)  # [B, 96, 16, 16]
        x = self.inception3b(x)  # [B, 192, 16, 16]
        x = F.max_pool2d(x, kernel_size=2)  # [B, 192, 8, 8]
        x = self.avgpool(x)  # [B, 192, 1, 1]
        x = self.dropout(torch.flatten(x, 1))
        x = self.fc(x)
        return x

# 训练函数
def train(model, device, trainloader, optimizer, criterion, epoch):
    model.train()
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()
        
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
        
        if batch_idx % 100 == 0:
            print(f'Epoch: {epoch} | Batch: {batch_idx}/{len(trainloader)} '
                  f'| Loss: {loss.item():.3f} | Acc: {100.*correct/total:.1f}%')
    
    return 100.*correct/total

# 测试函数
def test(model, device, testloader, criterion):
    model.eval()
    test_loss = 0
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            
            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
    
    acc = 100.*correct/total
    print(f'Test Loss: {test_loss/(batch_idx+1):.3f} | Acc: {acc:.1f}%')
    return acc

# 主函数
def main():
    trainloader, testloader = get_dataloaders()
    
    model = InceptionNet_CIFAR().to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1, 
                               momentum=0.9, weight_decay=5e-4)
    scheduler = CosineAnnealingLR(optimizer, T_max=200)
    
    best_acc = 0
    for epoch in range(200):
        print(f"\nEpoch: {epoch+1}")
        train_acc = train(model, device, trainloader, optimizer, criterion, epoch)
        test_acc = test(model, device, testloader, criterion)
        scheduler.step()
        
        if test_acc > best_acc:
            best_acc = test_acc
            torch.save(model.state_dict(), 'best_model.pth')
    
    print(f"\nBest Test Accuracy: {best_acc:.2f}%")

if __name__ == '__main__':
    main()
  1. 消融实验:引入残差机制和cbam模块分别进行消融
python 复制代码
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.optim import SGD
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.tensorboard import SummaryWriter

# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 数据增强和加载
def get_dataloaders():
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(
        root='./data', train=True, download=True, transform=transform_train)
    trainloader = DataLoader(
        trainset, batch_size=128, shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR10(
        root='./data', train=False, download=True, transform=transform_test)
    testloader = DataLoader(
        testset, batch_size=100, shuffle=False, num_workers=2)

    return trainloader, testloader

# 基础组件
class BasicConv2d(nn.Module):
    def __init__(self, in_channels, out_channels, **kwargs):
        super().__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
        self.bn = nn.BatchNorm2d(out_channels)
        
    def forward(self, x):
        return F.relu(self.bn(self.conv(x)))

class CBAM(nn.Module):
    """Convolutional Block Attention Module"""
    def __init__(self, channels, reduction=16):
        super().__init__()
        # 通道注意力
        self.channel_att = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels, channels//reduction, 1),
            nn.ReLU(),
            nn.Conv2d(channels//reduction, channels, 1),
            nn.Sigmoid()
        )
        # 空间注意力
        self.spatial_att = nn.Sequential(
            nn.Conv2d(2, 1, 7, padding=3),
            nn.Sigmoid()
        )

    def forward(self, x):
        # 通道注意力
        channel = self.channel_att(x)
        # 空间注意力
        spatial = torch.cat([x.mean(1,keepdim=True), x.max(1,keepdim=True)[0]], 1)
        spatial = self.spatial_att(spatial)
        return x * channel * spatial

# Inception模块变体
class BaseInception(nn.Module):
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
        super().__init__()
        self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)
        self.branch2 = nn.Sequential(
            BasicConv2d(in_channels, ch3x3red, kernel_size=1),
            BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1)
        )
        self.branch3 = nn.Sequential(
            BasicConv2d(in_channels, ch5x5red, kernel_size=1),
            BasicConv2d(ch5x5red, ch5x5, kernel_size=5, padding=2)
        )
        self.branch4 = nn.Sequential(
            nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
            BasicConv2d(in_channels, pool_proj, kernel_size=1)
        )

    def forward(self, x):
        return torch.cat([
            self.branch1(x),
            self.branch2(x),
            self.branch3(x),
            self.branch4(x)
        ], 1)

class ResInception(BaseInception):
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
        super().__init__(in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj)
        out_channels = ch1x1 + ch3x3 + ch5x5 + pool_proj
        self.residual = nn.Identity()
        if in_channels != out_channels:
            self.residual = BasicConv2d(in_channels, out_channels, kernel_size=1)

    def forward(self, x):
        residual = self.residual(x)
        return F.relu(residual + super().forward(x))

class CBAMInception(BaseInception):
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
        super().__init__(in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj)
        out_channels = ch1x1 + ch3x3 + ch5x5 + pool_proj
        self.cbam = CBAM(out_channels)

    def forward(self, x):
        out = super().forward(x)
        return self.cbam(out)

class ResCBAMInception(ResInception):
    def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
        super().__init__(in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj)
        out_channels = ch1x1 + ch3x3 + ch5x5 + pool_proj
        self.cbam = CBAM(out_channels)

    def forward(self, x):
        residual = self.residual(x)
        out = super().forward(x)
        return F.relu(residual + self.cbam(out))

# 网络架构
class InceptionNet(nn.Module):
    def __init__(self, inception_block=BaseInception, num_classes=10):
        super().__init__()
        self.stem = nn.Sequential(
            BasicConv2d(3, 64, kernel_size=3, stride=1, padding=1),
            BasicConv2d(64, 64, kernel_size=3, padding=1),
            BasicConv2d(64, 64, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )
        
        self.inception3a = inception_block(64, 32, 32, 32, 16, 16, 16)
        self.inception3b = inception_block(96, 64, 32, 64, 16, 32, 32)
        
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.dropout = nn.Dropout(0.5)
        self.fc = nn.Linear(192, num_classes)

    def forward(self, x):
        x = self.stem(x)  # [B, 64, 16, 16]
        x = self.inception3a(x)  # [B, 96, 16, 16]
        x = self.inception3b(x)  # [B, 192, 16, 16]
        x = F.max_pool2d(x, kernel_size=2)  # [B, 192, 8, 8]
        x = self.avgpool(x)  # [B, 192, 1, 1]
        x = self.dropout(torch.flatten(x, 1))
        x = self.fc(x)
        return x

# 训练和测试函数
def train(model, loader, optimizer, criterion, epoch, writer=None):
    model.train()
    correct, total = 0, 0
    for inputs, targets in loader:
        inputs, targets = inputs.to(device), targets.to(device)
        
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()
        
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()
    
    acc = 100.*correct/total
    if writer:
        writer.add_scalar('train_acc', acc, epoch)
    return acc

def test(model, loader, criterion, epoch, writer=None):
    model.eval()
    test_loss, correct, total = 0, 0, 0
    with torch.no_grad():
        for inputs, targets in loader:
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            
            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
    
    acc = 100.*correct/total
    if writer:
        writer.add_scalar('test_acc', acc, epoch)
    return acc

# 消融实验主函数
def run_ablation():
    trainloader, testloader = get_dataloaders()
    criterion = nn.CrossEntropyLoss()
    
    variants = {
        "Baseline": InceptionNet(BaseInception),
        "Residual": InceptionNet(ResInception),
        "CBAM": InceptionNet(CBAMInception),
        "Res+CBAM": InceptionNet(ResCBAMInception)
    }
    
    results = {}
    for name, model_cls in variants.items():
        print(f"\n=== Training {name} ===")
        writer = SummaryWriter(f'runs/{name}')
        model = model_cls().to(device)
        optimizer = SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
        scheduler = CosineAnnealingLR(optimizer, T_max=200)
        
        best_acc = 0
        for epoch in range(200):
            train_acc = train(model, trainloader, optimizer, criterion, epoch, writer)
            test_acc = test(model, testloader, criterion, epoch, writer)
            scheduler.step()
            
            if test_acc > best_acc:
                best_acc = test_acc
                torch.save(model.state_dict(), f'{name}_best.pth')
            
            if epoch % 10 == 0:
                print(f'Epoch {epoch}: Train Acc={train_acc:.2f}%, Test Acc={test_acc:.2f}%')
        
        results[name] = best_acc
        writer.close()
    
    print("\n=== Final Results ===")
    for name, acc in results.items():
        print(f"{name}: {acc:.2f}%")

if __name__ == '__main__':
    run_ablation()
相关推荐
AI大模型技术社2 小时前
🔧 PyTorch高阶开发工具箱:自定义模块+损失函数+部署流水线完整实现
人工智能·pytorch
Johny_Zhao3 小时前
CentOS Stream 8 高可用 Kuboard 部署方案
linux·网络·python·网络安全·docker·信息安全·kubernetes·云计算·shell·yum源·系统运维·kuboard
神经星星4 小时前
从石英到铁电材料,哈佛大学提出等变机器学习框架,加速材料大规模电场模拟
人工智能·深度学习·机器学习
站大爷IP4 小时前
精通einsum():多维数组操作的瑞士军刀
python
站大爷IP4 小时前
Python与MongoDB的亲密接触:从入门到实战的代码指南
python
Roc-xb5 小时前
/etc/profile.d/conda.sh: No such file or directory : numeric argument required
python·ubuntu·conda
KENYCHEN奉孝5 小时前
PyTorch 实现 MNIST 手写数字识别
人工智能·pytorch·深度学习
青椒大仙KI116 小时前
论文笔记 <交通灯> <多智能体>DERLight双重经验回放灯机制
论文阅读·人工智能·深度学习
世由心生6 小时前
[从0到1]环境准备--anaconda与pycharm的安装
ide·python·pycharm
苏苏susuus6 小时前
深度学习:PyTorch自动微分模块
人工智能·pytorch·深度学习