day36 复习日(信贷神经网络)

**作业:**对之前的信贷项目,利用神经网络训练下,尝试用到目前的知识点让代码更加规范和美观。

python 复制代码
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import time 
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")

# 使用设备
device = torch.device("mps")

# 读取数据
data = pd.read_csv('data.csv')

# 筛选连续型特征
continuous_features = data.select_dtypes(include=['float64', 'int64']).columns.to_list()

# 筛选离散型特征
discrete_features = data.select_dtypes(include=['object']).columns.to_list()

# 缺失值补全
for feature in continuous_features:
    data[feature].fillna(data[feature].mode()[0], inplace=True)

# 标签编码
mapping = {
    'Home Ownership': {
        'Own Home': 1,
        'Rent': 2,
        'Have Mortgage': 3,
        'Home Mortgage': 4
    },
    'Years in current job': {
        '< 1 year': 0,
        '1 year': 1,
        '2 years': 2,
        '3 years': 3,
        '4 years': 4,
        '5 years': 5,
        '6 years': 6,
        '7 years': 7,
        '8 years': 8,
        '9 years': 9,
        '10+ years': 10
    },
    'Term': {
        'Short Term': 0,
        'Long Term': 1
    }
}
data['Home Ownership'] = data['Home Ownership'].map(mapping['Home Ownership'])
data['Years in current job'] = data['Years in current job'].map(mapping['Years in current job'])
data['Term'] = data['Term'].map(mapping['Term'])
data.rename(columns={'Term': 'Long Term'}, inplace=True) 

# 独热编码
data = pd.get_dummies(data, columns=['Purpose'])
data2 = pd.read_csv('data.csv')
list = []
for i in data.columns:
    if i not in data2.columns:
        list.append(i)
for i in list:
    data[i] = data[i].astype(int)

# 特征和标签的分离
X = data.drop(['Credit Default'], axis=1)
y = data['Credit Default']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 归一化
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)

# 转换为张量
X_train = torch.FloatTensor(X_train).to(device)
X_test = torch.FloatTensor(X_test).to(device)
y_train = torch.LongTensor(y_train.values).to(device)
y_test = torch.LongTensor(y_test.values).to(device)

# 定义MLP
class MLP(nn.Module):
    def __init__(self):
        super(MLP, self).__init__()
        self.fc1 = nn.Linear(31, 64)  # 输入层到第一个隐藏层
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(64, 32)  # 第一个隐藏层到第二个隐藏层
        self.fc3 = nn.Linear(32, 2)  # 第二个隐藏层到输出层

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        out = self.relu(out)
        out = self.fc3(out)
        return out

model = MLP().to(device)

# 损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

# 模型训练
num_epochs = 100

losses = []
epoches = []

start_time = time.time()

with tqdm(total=num_epochs, desc='训练进度', unit='epoch') as pbar:
    for epoch in range(num_epochs):
        # 前向传播
        outputs = model(X_train)
        loss = criterion(outputs, y_train)
        
        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        # 记录损失并更新进度条
        
        losses.append(loss.item())
        epoches.append(epoch+1)
            # 更新进度条的描述信息
        pbar.set_postfix({'Loss':f'{loss.item():.4f}'})
        
        # 每10个epoch更新一次进度条
        if (epoch + 1) % 10 == 0:
            pbar.update(10)
    
    # 确保进度条达到100%
    if pbar.n < num_epochs:
        pbar.update(num_epochs - pbar.n)  # 计算剩余的进度并更新

time_all = time.time() - start_time
print(f'Training time: {time_all:.2f} seconds')

# 模型推理
model.eval()
with torch.no_grad():
    outputs = model(X_test)
    _, predicted = torch.max(outputs, 1)
    accuracy = (predicted == y_test).sum().item() / len(y_test)
    print(f'Accuracy: {accuracy*100:.2f}%')

# 可视化曲线
plt.figure(figsize=(10, 6))
plt.plot(epoches, losses, label='Training Loss')
plt.title('Training Loss Curve')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.grid(True)
plt.show()  

训练进度: 100%|██████████| 100/100 [00:00<00:00, 201.31epoch/s, Loss=0.5917]

Training time: 0.50 seconds

Accuracy: 70.60%

@浙大疏锦行

相关推荐
Hcoco_me30 分钟前
大模型面试题71: DPO有什么缺点?后续对DPO算法有哪些改进?
人工智能·深度学习·算法·自然语言处理·transformer·vllm
小白学大数据32 分钟前
爬虫技术选股:Python 自动化筛选潜力股
开发语言·爬虫·python·自动化
践行见远1 小时前
django之认证与权限
python·django
青春不败 177-3266-05201 小时前
基于R语言lavaan结构方程模型(SEM)实践技术应用
python·r语言·贝叶斯·生态学·结构方程·sem
哥布林学者1 小时前
吴恩达深度学习课程五:自然语言处理 第一周:循环神经网络 课后习题与代码实践
深度学习·ai
TTGGGFF1 小时前
人工智能:[特殊字符] Bert-Base-Chinese预训练模型部署手册
人工智能·深度学习·bert
费弗里1 小时前
进阶技巧:在Dash应用中直接使用原生React组件
python·dash
kebijuelun1 小时前
DeepSeek Engram:给大模型新增一条“记忆稀疏”轴
人工智能·深度学习·语言模型·transformer
Ashley_Amanda2 小时前
Python入门知识点梳理
开发语言·windows·python
香草泡芙2 小时前
AI Agent 深度解析:原理、架构与未来应用浪潮
人工智能·深度学习·机器学习