DAY 35 超大力王爱学Python

知识点回顾:

  1. 三种不同的模型可视化方法:推荐torchinfo打印summary+权重分布可视化
  2. 进度条功能:手动和自动写法,让打印结果更加美观
  3. 推理的写法:评估模式

作业:调整模型定义时的超参数,对比下效果。

python 复制代码
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import time
import matplotlib.pyplot as plt
from tqdm import tqdm

# 设置GPU设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 加载鸢尾花数据集
iris = load_iris()
X = iris.data  # 特征数据
y = iris.target  # 标签数据

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 归一化数据
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# 将数据转换为PyTorch张量并移至GPU
X_train = torch.FloatTensor(X_train).to(device)
y_train = torch.LongTensor(y_train).to(device)
X_test = torch.FloatTensor(X_test).to(device)
y_test = torch.LongTensor(y_test).to(device)

# 定义不同配置的模型
class MLP1(nn.Module):
    def __init__(self):
        super(MLP1, self).__init__()
        self.fc1 = nn.Linear(4, 10)  # 原始配置
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(10, 3)

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out

class MLP2(nn.Module):
    def __init__(self):
        super(MLP2, self).__init__()
        self.fc1 = nn.Linear(4, 20)  # 增加隐藏层大小
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(20, 3)

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out

class MLP3(nn.Module):
    def __init__(self):
        super(MLP3, self).__init__()
        self.fc1 = nn.Linear(4, 10)  # 增加一层隐藏层
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Linear(10, 10)
        self.relu2 = nn.ReLU()
        self.fc3 = nn.Linear(10, 3)

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu1(out)
        out = self.fc2(out)
        out = self.relu2(out)
        out = self.fc3(out)
        return out

# 训练函数
def train_model(model, optimizer, criterion, num_epochs=20000):
    model.to(device)
    losses = []
    epochs = []
    start_time = time.time()
    
    with tqdm(total=num_epochs, desc="训练进度", unit="epoch") as pbar:
        for epoch in range(num_epochs):
            outputs = model(X_train)
            loss = criterion(outputs, y_train)
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            if (epoch + 1) % 200 == 0:
                losses.append(loss.item())
                epochs.append(epoch + 1)
                pbar.set_postfix({'Loss': f'{loss.item():.4f}'})
            
            if (epoch + 1) % 1000 == 0:
                pbar.update(1000)
        
        if pbar.n < num_epochs:
            pbar.update(num_epochs - pbar.n)
    
    time_all = time.time() - start_time
    print(f'Training time: {time_all:.2f} seconds')
    
    # 测试模型
    model.eval()
    with torch.no_grad():
        outputs = model(X_test)
        _, predicted = torch.max(outputs, 1)
        accuracy = (predicted == y_test).sum().item() / y_test.size(0)
    
    return losses, epochs, accuracy

# 配置1:原始配置
model1 = MLP1()
optimizer1 = optim.SGD(model1.parameters(), lr=0.01)
criterion = nn.CrossEntropyLoss()
print("训练配置1:原始配置")
losses1, epochs1, acc1 = train_model(model1, optimizer1, criterion)

# 配置2:增加隐藏层大小
model2 = MLP2()
optimizer2 = optim.SGD(model2.parameters(), lr=0.01)
print("训练配置2:增加隐藏层大小")
losses2, epochs2, acc2 = train_model(model2, optimizer2, criterion)

# 配置3:增加网络深度
model3 = MLP3()
optimizer3 = optim.SGD(model3.parameters(), lr=0.01)
print("训练配置3:增加网络深度")
losses3, epochs3, acc3 = train_model(model3, optimizer3, criterion)

# 配置4:使用Adam优化器
model4 = MLP1()
optimizer4 = optim.Adam(model4.parameters(), lr=0.001)  # Adam通常需要更小的学习率
print("训练配置4:使用Adam优化器")
losses4, epochs4, acc4 = train_model(model4, optimizer4, criterion)

# 配置5:增加正则化
model5 = MLP1()
optimizer5 = optim.SGD(model5.parameters(), lr=0.01, weight_decay=0.001)  # L2正则化
print("训练配置5:增加L2正则化")
losses5, epochs5, acc5 = train_model(model5, optimizer5, criterion)

# 可视化比较
plt.figure(figsize=(12, 8))
plt.plot(epochs1, losses1, label='原始配置 (Acc: {:.2f}%)'.format(acc1*100))
plt.plot(epochs2, losses2, label='增加隐藏层大小 (Acc: {:.2f}%)'.format(acc2*100))
plt.plot(epochs3, losses3, label='增加网络深度 (Acc: {:.2f}%)'.format(acc3*100))
plt.plot(epochs4, losses4, label='Adam优化器 (Acc: {:.2f}%)'.format(acc4*100))
plt.plot(epochs5, losses5, label='L2正则化 (Acc: {:.2f}%)'.format(acc5*100))
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('不同超参数配置的训练损失比较')
plt.legend()
plt.grid(True)
plt.show()

# 打印最终准确率比较
print("\n准确率比较:")
print(f"配置1(原始): {acc1*100:.2f}%")
print(f"配置2(增加隐藏层大小): {acc2*100:.2f}%")
print(f"配置3(增加网络深度): {acc3*100:.2f}%")
print(f"配置4(Adam优化器): {acc4*100:.2f}%")
print(f"配置5(L2正则化): {acc5*100:.2f}%")

使用设备: cuda:0
训练配置1:原始配置
训练进度: 100%|██████████| 20000/20000 [00:12<00:00, 1626.00epoch/s, Loss=0.0623]
Training time: 12.30 seconds
训练配置2:增加隐藏层大小
训练进度: 100%|██████████| 20000/20000 [00:12<00:00, 1622.88epoch/s, Loss=0.0615]
Training time: 12.33 seconds
训练配置3:增加网络深度
训练进度: 100%|██████████| 20000/20000 [00:16<00:00, 1182.20epoch/s, Loss=0.0475]
Training time: 16.92 seconds
训练配置4:使用Adam优化器
训练进度: 100%|██████████| 20000/20000 [00:16<00:00, 1225.55epoch/s, Loss=0.0466]
Training time: 16.32 seconds
训练配置5:增加L2正则化
训练进度: 100%|██████████| 20000/20000 [00:13<00:00, 1530.57epoch/s, Loss=0.0698]

准确率比较: 配置1(原始): 96.67% 配置2(增加隐藏层大小): 96.67% 配置3(增加网络深度): 100.00% 配置4(Adam优化器): 100.00% 配置5(L2正则化): 96.67%

相关推荐
Wilber的技术分享16 分钟前
【机器学习实战笔记 14】集成学习:XGBoost算法(一) 原理简介与快速应用
人工智能·笔记·算法·随机森林·机器学习·集成学习·xgboost
巴里巴气17 分钟前
selenium基础知识 和 模拟登录selenium版本
爬虫·python·selenium·爬虫模拟登录
198921 分钟前
【零基础学AI】第26讲:循环神经网络(RNN)与LSTM - 文本生成
人工智能·python·rnn·神经网络·机器学习·tensorflow·lstm
JavaEdge在掘金27 分钟前
Redis 数据倾斜?别慌!从成因到解决方案,一文帮你搞定
python
ansurfen31 分钟前
我的第一个AI项目:从零搭建RAG知识库的踩坑之旅
python·llm
前端付豪36 分钟前
20、用 Python + API 打造终端天气预报工具(支持城市查询、天气图标、美化输出🧊
后端·python
前端付豪41 分钟前
19、用 Python + OpenAI 构建一个命令行 AI 问答助手
后端·python
amazinging1 小时前
北京-4年功能测试2年空窗-报培训班学测开-第四十三天
python·学习
victory04311 小时前
SpiceMix enables integrative single-cell spatial modeling of cell identity 文章解读
人工智能·深度学习
JoernLee1 小时前
机器学习算法:支持向量机SVM
人工智能·算法·机器学习