Day40 复习日

用 MLP 神经网络训练,并且让代码更规范美观,用到之前讲的知识点比如类的 call 方法、模型评估、GPU 训练、模型结构可视化等。

首先,梳理步骤:

代码重构,模块化:把预处理、模型构建、训练、评估封装成类或函数,提升可读性和复用性。

引入 PyTorch 构建 MLP,适配 GPU/CPU,用到__call__方法封装训练和推理。

加入模型结构可视化(torchinfo)。

规范的训练流程:数据转换为张量、数据加载器、训练循环、验证。

完整的评估:准确率、F1、混淆矩阵等。

代码格式优化:注释清晰、命名规范、避免重复导入、路径处理(用 os.path 避免转义问题)。

复制代码
# ===================== 1. 导入模块(去重+规范) =====================
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, TensorDataset
from torchinfo import summary  # 模型结构可视化
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
from sklearn.preprocessing import StandardScaler  # 连续特征标准化(MLP必备)
 
# 全局配置(统一管理)
warnings.filterwarnings("ignore")
plt.rcParams['font.sans-serif'] = ['SimHei']  # 中文显示
plt.rcParams['axes.unicode_minus'] = False
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # 设备配置
SEED = 42  # 随机种子(保证可复现)
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
    torch.cuda.manual_seed(SEED)
 
# ===================== 2. 路径配置(避免转义+跨平台) =====================
DATA_PATH = r"E:\study\PythonStudy\data.csv"  # r-string避免转义
assert os.path.exists(DATA_PATH), f"数据文件不存在:{DATA_PATH}"
 
# ===================== 3. 数据预处理(封装为函数,复用性强) =====================
def preprocess_data(data_path):
    """
    数据预处理函数:封装所有清洗、编码、缺失值填充逻辑
    :param data_path: 数据文件路径
    :return: 标准化后的特征矩阵X、标签y
    """
    # 读取数据
    data = pd.read_csv(data_path)
    data2 = pd.read_csv(data_path)  # 用于独热编码列名对比
    
    # 1. 字符串变量编码
    # Home Ownership 标签编码
    home_ownership_mapping = {
        'Own Home': 1,
        'Rent': 2,
        'Have Mortgage': 3,
        'Home Mortgage': 4
    }
    data['Home Ownership'] = data['Home Ownership'].map(home_ownership_mapping)
    
    # Years in current job 标签编码
    years_in_job_mapping = {
        '< 1 year': 1,
        '1 year': 2,
        '2 years': 3,
        '3 years': 4,
        '4 years': 5,
        '5 years': 6,
        '6 years': 7,
        '7 years': 8,
        '8 years': 9,
        '9 years': 10,
        '10+ years': 11
    }
    data['Years in current job'] = data['Years in current job'].map(years_in_job_mapping)
    
    # Purpose 独热编码 + bool转数值
    data = pd.get_dummies(data, columns=['Purpose'])
    # 筛选独热编码新增列并转int
    new_cols = [col for col in data.columns if col not in data2.columns]
    for col in new_cols:
        data[col] = data[col].astype(int)
    
    # Term 映射 + 列重命名
    term_mapping = {'Short Term': 0, 'Long Term': 1}
    data['Term'] = data['Term'].map(term_mapping)
    data.rename(columns={'Term': 'Long Term'}, inplace=True)
    
    # 2. 缺失值填充(连续特征用众数)
    continuous_features = data.select_dtypes(include=['int64', 'float64']).columns.tolist()
    for feat in continuous_features:
        mode_val = data[feat].mode()[0]
        data[feat].fillna(mode_val, inplace=True)
    
    # 3. 特征和标签分离 + 标准化(MLP对特征尺度敏感,必做)
    X = data.drop(['Credit Default'], axis=1)
    y = data['Credit Default']
    scaler = StandardScaler()  # 标准化:均值0,方差1
    X_scaled = scaler.fit_transform(X)
    
    return X_scaled, y, X.columns.tolist()  # 返回标准化特征、标签、特征名
 
# ===================== 4. MLP模型定义(继承nn.Module,规范) =====================
class CreditDefaultMLP(nn.Module):
    """
    信用违约预测MLP模型
    :param input_dim: 输入特征维度
    :param hidden_dims: 隐藏层维度列表,如[128, 64]
    :param dropout: Dropout概率(防止过拟合)
    """
    def __init__(self, input_dim, hidden_dims=[128, 64], dropout=0.2):
        super().__init__()
        # 构建隐藏层
        layers = []
        prev_dim = input_dim
        for dim in hidden_dims:
            layers.append(nn.Linear(prev_dim, dim))
            layers.append(nn.ReLU())  # 激活函数
            layers.append(nn.Dropout(dropout))  # Dropout正则化
            prev_dim = dim
        # 输出层(二分类,用Sigmoid)
        layers.append(nn.Linear(prev_dim, 1))
        layers.append(nn.Sigmoid())
        
        self.mlp = nn.Sequential(*layers)  # 封装层
    
    def forward(self, x):
        """前向传播(nn.Module核心方法)"""
        return self.mlp(x)
 
# ===================== 5. 训练器类(封装训练逻辑,用__call__实现单步训练) =====================
class MLPTrainer:
    def __init__(self, model, optimizer, criterion, device):
        self.model = model.to(device)
        self.optimizer = optimizer
        self.criterion = criterion
        self.device = device
        self.train_losses = []  # 记录训练损失
        self.val_losses = []    # 记录验证损失
    
    def __call__(self, x_batch, y_batch, train_mode=True):
        """
        单步训练/验证(__call__让实例可像函数调用)
        :param x_batch: 批次特征张量
        :param y_batch: 批次标签张量
        :param train_mode: True=训练(反向传播),False=验证(无梯度)
        :return: 批次损失
        """
        x_batch = x_batch.to(self.device, dtype=torch.float32)
        y_batch = y_batch.to(self.device, dtype=torch.float32).unsqueeze(1)  # 适配输出维度
        
        if train_mode:
            self.model.train()  # 训练模式(启用Dropout)
            self.optimizer.zero_grad()  # 清空梯度
            outputs = self.model(x_batch)
            loss = self.criterion(outputs, y_batch)
            loss.backward()  # 反向传播
            self.optimizer.step()  # 更新参数
        else:
            self.model.eval()  # 验证模式(关闭Dropout)
            with torch.no_grad():  # 关闭梯度,省显存
                outputs = self.model(x_batch)
                loss = self.criterion(outputs, y_batch)
        
        return loss.item()
 
# ===================== 6. 评估函数(规范的分类评估) =====================
def evaluate_model(model, dataloader, device):
    """
    模型评估:计算准确率、F1-score、混淆矩阵
    :return: 评估指标字典
    """
    model.eval()
    all_preds = []
    all_labels = []
    
    with torch.no_grad():
        for x_batch, y_batch in dataloader:
            x_batch = x_batch.to(device, dtype=torch.float32)
            outputs = model(x_batch)
            preds = (outputs > 0.5).int()  # Sigmoid输出>0.5为正例
            all_preds.extend(preds.cpu().numpy().flatten())
            all_labels.extend(y_batch.numpy().flatten())
    
    # 计算指标
    accuracy = accuracy_score(all_labels, all_preds)
    f1 = f1_score(all_labels, all_preds, average='weighted')  # 加权F1(适配类别不均衡)
    cm = confusion_matrix(all_labels, all_preds)
    
    return {
        'accuracy': accuracy,
        'f1_score': f1,
        'confusion_matrix': cm,
        'preds': all_preds,
        'labels': all_labels
    }
 
# ===================== 7. 结果可视化函数(封装,复用性强) =====================
def plot_results(train_losses, val_losses, eval_metrics):
    """
    可视化训练损失和评估结果
    """
    fig, axes = plt.subplots(1, 2, figsize=(14, 6))
    
    # 子图1:训练/验证损失曲线
    axes[0].plot(train_losses, label='训练损失', color='blue')
    axes[0].plot(val_losses, label='验证损失', color='red')
    axes[0].set_xlabel('迭代轮次(批次)')
    axes[0].set_ylabel('损失值')
    axes[0].set_title('MLP训练/验证损失曲线')
    axes[0].legend()
    axes[0].grid(True, alpha=0.3)
    
    # 子图2:混淆矩阵
    cm = eval_metrics['confusion_matrix']
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=axes[1])
    axes[1].set_xlabel('预测标签')
    axes[1].set_ylabel('真实标签')
    axes[1].set_title(f'混淆矩阵(准确率:{eval_metrics["accuracy"]:.4f})')
    
    plt.tight_layout()
    plt.savefig('credit_default_mlp_results.png', dpi=300, bbox_inches='tight')
    plt.show()
 
# ===================== 8. 主流程(入口函数,逻辑清晰) =====================
def main():
    # Step1:数据预处理
    print("===== 开始数据预处理 =====")
    X_scaled, y, feature_names = preprocess_data(DATA_PATH)
    input_dim = X_scaled.shape[1]  # 输入特征维度
    print(f"预处理完成,输入特征维度:{input_dim},样本数:{X_scaled.shape[0]}")
    
    # Step2:划分训练/测试集 + 转换为TensorDataset
    X_train, X_test, y_train, y_test = train_test_split(
        X_scaled, y, test_size=0.2, random_state=SEED
    )
    # 转换为PyTorch张量
    train_dataset = TensorDataset(torch.from_numpy(X_train), torch.from_numpy(y_train.values))
    test_dataset = TensorDataset(torch.from_numpy(X_test), torch.from_numpy(y_test.values))
    # 构建DataLoader(批量加载)
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
    
    # Step3:初始化模型 + 优化器 + 损失函数
    model = CreditDefaultMLP(input_dim=input_dim, hidden_dims=[128, 64], dropout=0.2)
    optimizer = optim.Adam(model.parameters(), lr=1e-3)  # Adam优化器
    criterion = nn.BCELoss()  # 二分类交叉熵损失(适配Sigmoid输出)
    
    # Step4:模型结构可视化
    print("\n===== 模型结构 =====")
    summary(model, input_size=(32, input_dim), device=DEVICE)  # 32=batch_size
    
    # Step5:初始化训练器
    trainer = MLPTrainer(model, optimizer, criterion, DEVICE)
    
    # Step6:训练模型
    print("\n===== 开始训练 =====")
    epochs = 50  # 训练轮次
    for epoch in range(epochs):
        epoch_train_loss = 0.0
        # 训练批次
        for x_batch, y_batch in train_loader:
            batch_loss = trainer(x_batch, y_batch, train_mode=True)
            epoch_train_loss += batch_loss
        avg_train_loss = epoch_train_loss / len(train_loader)
        trainer.train_losses.append(avg_train_loss)
        
        # 验证批次
        epoch_val_loss = 0.0
        for x_batch, y_batch in test_loader:
            batch_loss = trainer(x_batch, y_batch, train_mode=False)
            epoch_val_loss += batch_loss
        avg_val_loss = epoch_val_loss / len(test_loader)
        trainer.val_losses.append(avg_val_loss)
        
        # 打印训练进度
        if (epoch + 1) % 5 == 0:
            print(f"Epoch [{epoch+1}/{epochs}] | 训练损失:{avg_train_loss:.4f} | 验证损失:{avg_val_loss:.4f}")
    
    # Step7:模型评估
    print("\n===== 模型评估 =====")
    eval_metrics = evaluate_model(model, test_loader, DEVICE)
    print(f"准确率:{eval_metrics['accuracy']:.4f}")
    print(f"F1-score:{eval_metrics['f1_score']:.4f}")
    
    # Step8:结果可视化
    plot_results(trainer.train_losses, trainer.val_losses, eval_metrics)
    
    # Step9:保存模型
    torch.save(model.state_dict(), 'credit_default_mlp.pth')
    print("\n===== 训练完成,模型已保存为 credit_default_mlp.pth =====")
 
# ===================== 9. 执行主流程 =====================
if __name__ == "__main__":
    main()

@浙大疏锦行

相关推荐
QYZL_AIGC2 小时前
全域众链以需求为基、政策为翼,创AI + 实体的可行之路
人工智能
火星资讯2 小时前
Zenlayer AI Gateway 登陆 Dify 市场,轻装上阵搭建 AI Agent
大数据·人工智能
TextIn智能文档云平台2 小时前
LLM处理非结构化文档有哪些痛点
人工智能·文档解析
行如流水2 小时前
BLIP和BLIP2解析
深度学习
Coder_Boy_2 小时前
DDD从0到企业级:迭代式学习 (共17章)之 四
java·人工智能·驱动开发·学习
木头左2 小时前
LSTM量化交易策略中时间序列预测的关键输入参数分析与Python实现
人工智能·python·lstm
king王一帅3 小时前
流式渲染 Incremark、ant-design-x markdown、streammarkdown-vue 全流程方案对比
前端·javascript·人工智能
AI营销实验室3 小时前
AI CRM系统推荐,原圈科技赋能地产销售
人工智能·科技
cskywit3 小时前
MobileMamba中的小波分析
人工智能·深度学习