目录

第R4周:LSTM-火灾温度预测

电脑环境:

语言环境:Python 3.8.0

一、代码流程

1、导入包,设置GPU

python 复制代码
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import pandas as pd

2、导入数据

python 复制代码
data = pd.read_csv('woodpine2.csv')
data
python 复制代码
	Time	Tem1	CO 1	Soot 1
0	0.000	25.0	0.000000	0.000000
1	0.228	25.0	0.000000	0.000000
2	0.456	25.0	0.000000	0.000000
3	0.685	25.0	0.000000	0.000000
4	0.913	25.0	0.000000	0.000000
...	...	...	...	...
5943	366.000	295.0	0.000077	0.000496
5944	366.000	294.0	0.000077	0.000494
5945	367.000	292.0	0.000077	0.000491
5946	367.000	291.0	0.000076	0.000489
5947	367.000	290.0	0.000076	0.000487
5948 rows × 4 columns

3、数据集可视化

python 复制代码
from os import confstr_names
import matplotlib.pyplot as plt
import seaborn as sns

plt.rcParams['figure.dpi'] = 500
plt.rcParams['savefig.dpi'] = 500

fig, ax = plt.subplots(1, 3, constrained_layout=True, figsize=(14, 3))
sns.lineplot(data=data['Tem1'], ax=ax[0])
sns.lineplot(data=data['CO 1'], ax=ax[1])
sns.lineplot(data=data['Soot 1'], ax=ax[2])
plt.show()
python 复制代码
dataFrame = data.iloc[:, 1:]
dataFrame

4、数据集预处理

python 复制代码
from sklearn.preprocessing import MinMaxScaler

dataFrame = data.iloc[:, 1:].copy()

scaler = MinMaxScaler(feature_range=(0, 1))

for i in ['CO 1', 'Soot 1', 'Tem1']:
    dataFrame[i] = scaler.fit_transform(dataFrame[i].values.reshape(-1, 1))  

dataFrame.shape

(5948, 3)

5、设置X,y

python 复制代码
width_X = 8
width_Y = 1

X = []
y = []

in_start = 0

for _, _ in data.iterrows():
    in_end = in_start + width_X
    out_end = in_end + width_Y
    if out_end < len(dataFrame):
        X_ = np.array(dataFrame.iloc[in_start:in_end, :])
        y_ = np.array(dataFrame.iloc[in_end:out_end, :])
        X.append(X_)
        y.append(y_)
    in_start += 1 
X = np.array(X)
y = np.array(y)

X.shape, y.shape

((5939, 8, 3), (5939, 1, 1))

检查数据集中是否有空值

python 复制代码
print(np.any(np.isnan(X)))
print(np.any(np.isnan(y)))

6、划分数据集

python 复制代码
X_train = torch.tensor(np.array(X[:5000]), dtype=torch.float32)
y_train = torch.tensor(np.array(y[:5000]), dtype=torch. float32)

X_test = torch.tensor(np.array(X[5000:]), dtype=torch.float32)
y_test = torch.tensor(np.array(y[5000:]), dtype=torch. float32)

X_train.shape, y_train.shape

(torch.Size([5000, 8, 3]), torch.Size([5000, 1, 3]))

python 复制代码
from torch.utils.data import TensorDataset, DataLoader
train_dl = DataLoader(TensorDataset(X_train, y_train),
                        batch_size=64,
                        shuffle=False)
test_dl = DataLoader(TensorDataset(X_test, y_test),
                        batch_size=64,
                        shuffle=False)

7、构建模型

python 复制代码
class model_lstm(nn.Module):
    def __init__(self):
        super(model_lstm, self).__init__()

        self.lstm0 = nn.LSTM(input_size=3, hidden_size=320,
                             num_layers=1, batch_first=True)
        
        self.lstm1 = nn.LSTM(input_size=320, hidden_size=320,
                             num_layers=1, batch_first=True)
        self.fc0 = nn.Linear(320, 1)

    def forward(self, x):
        out, hidden1 = self.lstm0(x)
        out, _ = self. lstm1(out, hidden1)
        out = self.fc0(out)
        return out[:, -1:, :]
        #取2个预测值,否则经过1stm会得到8*2个预
model = model_lstm()
model

8、定义训练函数

python 复制代码
import copy
def train(train_dl, model, loss_fn, opt, lr_scheduler=None) :
    size = len(train_dl.dataset)
    num_batches = len(train_dl)
    train_loss = 0 #初始化训练损失和正确率
    for x, y in train_dl:
        x, y = x.to(device), y.to(device)
        #计算预测误差
        pred = model(x) #网络输出
        loss = loss_fn(pred, y) #计算网络输出和真实值之间的差距
        # 反向传播
        opt.zero_grad()#grad属性归零
        loss.backward()# 反向传播
        opt.step()# 每一步自动更新

        #记录Loss
        train_loss += loss. item()
    if lr_scheduler is not None:
        lr_scheduler.step()
        print ("learning rate = {:.5f}". format(opt.param_groups[0]['lr']), end='  ')
    train_loss /= num_batches
    return train_loss

9、定义测试函数

python 复制代码
def test (dataloader, model, loss_fn) :
    size = len(dataloader.dataset) #测试集的大小
    num_batches = len(dataloader)# 批次数目
    
    test_loss = 0
    # 当不进行训练时,停止梯度更新,节省计算内存消耗
    with torch.no_grad():
        for x, y in dataloader:
            x, y = x.to(device), y.to(device)
            # 计算loss
            y_pred = model(x)
            loss = loss_fn(y_pred, y)
            test_loss += loss.item()
    test_loss /= num_batches
    return test_loss

10、正式训练

python 复制代码
# 设置GPU训练
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")

#训练模型
model = model_lstm()
model = model.to(device)
loss_fn = nn.MSELoss() #创建损失函数
learn_rate = 1e-1 #学习率
opt = torch.optim.SGD(model.parameters(),lr=learn_rate, weight_decay=1e-4)
epochs = 50
train_loss = []
test_loss = []
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(opt, epochs, last_epoch=-1)

for epoch in range(epochs):
    model.train()
    epoch_train_loss = train(train_dl, model, loss_fn, opt, lr_scheduler)

    model.eval()
    epoch_test_loss = test(test_dl, model, loss_fn)

    train_loss.append(epoch_train_loss)
    test_loss.append(epoch_test_loss)

    template = ('Epoch: {:2d}, Train loss: {:.5f}, Test loss: {:.5f}')
    print(template.format(epoch+1, epoch_train_loss,epoch_test_loss))
print("="*20, 'Done', "="*70)

11、模型评估- LOSS图

python 复制代码
import matplotlib.pyplot as plt
plt. figure(figsize=(5, 3), dpi=120)

plt.plot(train_loss, label='LSTM Training Loss')
plt.plot(test_loss, label='LSTM Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()

12、调用模型进行训练

python 复制代码
predicted_y_lstm = sc.inverse_transform(model(X_test).detach().numpy().reshape(-1,1))
y_test_1 = sc.inverse_transform(y_test.reshape(-1,1))
y_test_one = [i[0] for i in y_test_1]
predicted_y_lstm_one = [i[0] for i in predicted_y_lstm]
                                        
plt.figure(figsize=(5, 3) , dpi=120)
# 画出真实数据和预测数据的对比曲线
plt.plot(y_test_one[:2000], color='red', label='real_temp')
plt.plot(predicted_y_lstm_one[:2000], color='blue', label='prediction')
plt.title('Title')
plt.xlabel('X')
plt.ylabel('y')
plt.legend ()
plt.show( )
python 复制代码
from sklearn import metrics
'''
RMSE:均方根误差--->对均方误差开方
R2:决定系数,可以简单理解为反映模型拟合优度的重要的统计量
'''
RMSE_lstm = metrics.mean_squared_error(predicted_y_lstm_one, y_test_1)**0.5
R2_lstm = metrics.r2_score(predicted_y_lstm_one, y_test_1)
print('均方根误差:%.5f' % RMSE_lstm)
print('R2: %.5f' % R2_lstm)

均方根误差:7.01314

R2: 0.82595

本文是转载文章,点击查看原文
如有侵权,请联系 xyy@jishuzhan.net 删除
相关推荐
小森776725 分钟前
(四)机器学习---逻辑回归及其Python实现
人工智能·python·算法·机器学习·逻辑回归·线性回归
生信碱移28 分钟前
入门级宏基因组数据分析教程,从实验到分析与应用
人工智能·经验分享·python·神经网络·数据挖掘·数据分析·数据可视化
發發期权酱40 分钟前
期权中的Gamma指标详解
大数据·人工智能
补三补四1 小时前
【深度学习基础】——机器的神经元:感知机
人工智能·深度学习·算法·机器学习
永洪科技1 小时前
AI领域再突破,永洪科技荣获“2025人工智能+创新案例”奖
大数据·人工智能·科技·数据分析·数据可视化
that's boy1 小时前
Google 发布 Sec-Gemini v1:用 AI 重塑网络安全防御格局?
人工智能·安全·web安全·chatgpt·midjourney·ai编程·ai写作
Sui_Network1 小时前
Crossmint 与 Walrus 合作,将协议集成至其跨链铸造 API 中
人工智能·物联网·游戏·区块链·智能合约
liruiqiang051 小时前
循环神经网络 - 长短期记忆网络
人工智能·rnn·深度学习·神经网络·机器学习·ai·lstm
小杨4041 小时前
python入门系列十六(网络编程)
人工智能·python·网络协议
Elastic 中国社区官方博客1 小时前
Elasticsearch 向量数据库,原生支持 Google Cloud Vertex AI 平台
大数据·数据库·人工智能·elasticsearch·搜索引擎·语言模型·自然语言处理