python打卡day35

python 复制代码
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import time
import matplotlib.pyplot as plt
from tqdm import tqdm

# 设置GPU设备
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 加载鸢尾花数据集
iris = load_iris()
X = iris.data  # 特征数据
y = iris.target  # 标签数据

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 归一化数据
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# 将数据转换为PyTorch张量并移至GPU
X_train = torch.FloatTensor(X_train).to(device)
y_train = torch.LongTensor(y_train).to(device)
X_test = torch.FloatTensor(X_test).to(device)
y_test = torch.LongTensor(y_test).to(device)

# 定义不同超参数的模型配置
configs = [
    {"hidden_size": 10, "lr": 0.01, "epochs": 20000},  # 原始配置
    {"hidden_size": 20, "lr": 0.01, "epochs": 20000},  # 增加隐藏层神经元
    {"hidden_size": 10, "lr": 0.05, "epochs": 20000},  # 提高学习率
    {"hidden_size": 10, "lr": 0.01, "epochs": 10000},  # 减少训练轮数
]

for config in configs:
    print(f"\n=== 当前配置: 隐藏层={config['hidden_size']}, 学习率={config['lr']}, 轮数={config['epochs']} ===")
    
    class MLP(nn.Module):
        def __init__(self):
            super(MLP, self).__init__()
            self.fc1 = nn.Linear(4, config['hidden_size'])  # 使用配置的隐藏层大小
            self.relu = nn.ReLU()
            self.fc2 = nn.Linear(config['hidden_size'], 3)

        def forward(self, x):
            out = self.fc1(x)
            out = self.relu(out)
            out = self.fc2(out)
            return out

    model = MLP().to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=config['lr'])
    
    # 训练模型
    losses = []
    with tqdm(total=config['epochs'], desc="训练进度", unit="epoch") as pbar:
        for epoch in range(config['epochs']):
            outputs = model(X_train)
            loss = criterion(outputs, y_train)
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            if (epoch + 1) % 200 == 0:
                losses.append(loss.item())
                pbar.set_postfix({'Loss': f'{loss.item():.4f}'})
            
            if (epoch + 1) % 1000 == 0:
                pbar.update(1000)
    
    # 评估模型
    model.eval()
    with torch.no_grad():
        outputs = model(X_test)
        _, predicted = torch.max(outputs, 1)
        correct = (predicted == y_test).sum().item()
        accuracy = correct / y_test.size(0)
        print(f'测试集准确率: {accuracy * 100:.2f}%')
python 复制代码
使用设备: cpu

=== 当前配置: 隐藏层=10, 学习率=0.01, 轮数=20000 ===
训练进度: 100%|██████████| 20000/20000 [00:31<00:00, 633.99epoch/s, Loss=0.0607]
测试集准确率: 96.67%

=== 当前配置: 隐藏层=20, 学习率=0.01, 轮数=20000 ===
训练进度: 100%|██████████| 20000/20000 [00:35<00:00, 565.66epoch/s, Loss=0.0607]
测试集准确率: 96.67%

=== 当前配置: 隐藏层=10, 学习率=0.05, 轮数=20000 ===
训练进度: 100%|██████████| 20000/20000 [00:27<00:00, 724.62epoch/s, Loss=0.0476]
测试集准确率: 100.00%

=== 当前配置: 隐藏层=10, 学习率=0.01, 轮数=10000 ===
训练进度: 100%|██████████| 10000/10000 [00:16<00:00, 607.61epoch/s, Loss=0.0866]测试集准确率: 96.67%

@浙大疏锦行

相关推荐
傻啦嘿哟5 分钟前
Python3解释器深度解析与实战教程:从源码到性能优化的全路径探索
开发语言·python
Emma歌小白8 分钟前
groupby.agg去重后的展平列表通用方法flatten_unique
python
yiqieyouliyuwo44 分钟前
DAY39打卡
机器学习
修仙的人1 小时前
【开发环境】 VSCode 快速搭建 Python 项目开发环境
前端·后端·python
hhhh明1 小时前
Windows11 运行IsaacSim GPU Vulkan崩溃
vscode·python
在钱塘江1 小时前
LangGraph构建Ai智能体-12-高级RAG之自适应RAG
人工智能·python
站大爷IP1 小时前
Python列表基础操作全解析:从创建到灵活应用
python
失散131 小时前
深度学习——03 神经网络(3)-网络优化方法
网络·深度学习·神经网络
m0_603888711 小时前
LLaMA-Adapter V2 Parameter-Efficient Visual Instruction Model
人工智能·深度学习·ai·llama·论文速览
在钱塘江1 小时前
LangGraph构建Ai智能体-12-高级RAG之纠错式RAG
人工智能·python