6 时间序列(不同位置的装置如何建模): GRU+Embedding

很多算法比赛经常会遇到不同的物体产生同含义的时间序列信息,比如不同位置的时间序列信息,风力发电、充电桩用电。经常会遇到该如此场景,对所有数据做统一处理喂给模型,模型很难学到区分信息,因此设计如果对不同位置的装置做嵌入操作,这也是本文书写的主要目的之一,如果对不同位置装置的时序数据做模型呢?

RGU: 循环神经网络模块,经常用于处理时序数据。

Embedding : 是 PyTorch 中的一个类,用于将离散的整数序列映射为连续的向量表示。

使用下面比赛的数据作为一个处理的DEMO:

2023中国华录杯数据湖算法大赛

import package

python 复制代码
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
#import tushare as ts
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader

from sklearn.preprocessing import LabelEncoder

import matplotlib.pyplot as plt
import tqdm
import sys
import os
import gc
import argparse
import warnings
 
warnings.filterwarnings('ignore')

load data

python 复制代码
class Config():
    #data_path = '../data/data1/train/power.csv'
    timestep = 14  # 时间步长,就是利用多少时间窗口
    batch_size = 32  # 批次大小
    feature_size = 1  # 每个步长对应的特征数量,这里只使用1维,每天的风速
    hidden_size = 56  # 隐层大小
    output_size = 1  # 由于是单输出任务,最终输出层大小为1,预测未来1天风速
    num_layers = 1  # lstm的层数
    epochs = 10 # 迭代轮数
    best_loss = 0 # 记录损失
    learning_rate = 0.00003 # 学习率
    model_name = 'lstm' # 模型名称
    save_path = './{}.pth'.format(model_name) # 最优模型保存路径
config = Config()

train_df = pd.read_csv('../初赛数据/phase1_train.csv')
test_df = pd.read_csv('../初赛数据/phase1_test.csv')


labelEncoder = LabelEncoder()
train_df['line_label'] = labelEncoder.fit_transform(train_df['line'])
#labelEncoder.transform(test_df['line'])

train_df = train_df.sort_values(["line",'date']).reset_index(drop=True)

train_df.line.unique()
复制代码
array(['L01', 'L02', 'L03', 'L04', 'L05', 'L06', 'L08', 'L09', 'L10'],
      dtype=object)

使用前面14天预测未来第七天:

1,2,3,4,5,6,7,8,9,10,11,12,13,14 -》14+7

【1,2,3,4,5,6,7,8,9,10,11,12,13,14】+1 -》 14+7+1

。。。。。

python 复制代码
#train_df.head()
his_pow_feats = []
for i in range(config.timestep):
    train_df[f'shift_{7+i}'] = train_df.groupby("line_label")['passenger_flow'].shift(7+i)
    his_pow_feats.append(f'shift_{7+i}')
train_df_drop_na = train_df[train_df[his_pow_feats].isna().sum(axis=1)==0]


class MyDataSet(Dataset):
    def __init__(self,train_df_drop_na,his_pow_feats):
        """
        train_df_drop_na
        """
        self.train_df = train_df_drop_na.reset_index(drop=True)

    def __len__(self):
        return len(self.train_df)
    def __getitem__(self,item):

        label = self.train_df.loc[item,'passenger_flow']
        id_encoder = self.train_df.loc[item,'line_label']
        his_feats_list = self.train_df.loc[item,his_pow_feats].values.tolist()
        
        return {
               "input_ids":torch.tensor(id_encoder,dtype=torch.long),
               "his_feats":torch.as_tensor(his_feats_list ,dtype=torch.float32).unsqueeze(-1),
               "labels":torch.tensor(label,dtype=torch.float32)}


RANDOM_SEED = 1023
df_train, df_test = train_test_split(train_df_drop_na, test_size=0.2, random_state=RANDOM_SEED)
df_val, df_test = train_test_split(df_test, test_size=0.5, random_state=RANDOM_SEED)
df_train.shape, df_val.shape, df_test.shape
 
def create_data_loader(train_df_drop_na,his_pow_feats,batch_size=32):
    ds = MyDataSet(train_df_drop_na,
                   his_pow_feats
                  )
    return DataLoader(ds,batch_size=batch_size)
BATCH_SIZE = 32
train_data_loader = create_data_loader(df_train,his_pow_feats=his_pow_feats,batch_size=BATCH_SIZE)
val_data_loader = create_data_loader(df_val, his_pow_feats=his_pow_feats,batch_size=BATCH_SIZE)
test_data_loader = create_data_loader(df_test,his_pow_feats=his_pow_feats,batch_size=BATCH_SIZE)


#train_df[cols]
# 7.定义LSTM网络
class GRUModel(nn.Module):
    def __init__(self, feature_size, hidden_size, num_layers, output_size):
        super(GRUModel, self).__init__()
        self.hidden_size = hidden_size  # 隐层大小
        self.num_layers = num_layers  # lstm层数
        # feature_size为特征维度,就是每个时间点对应的特征数量,这里为1
        self.gru = nn.GRU(feature_size, hidden_size, num_layers, batch_first=True,bidirectional=True)
        self.layer_norm = nn.LayerNorm(hidden_size*2)

        self.fc = nn.Linear(hidden_size*2+2, output_size)
        
        self.embedding = nn.Embedding(9, 2)
 
    def forward(self, x,id_label, hidden=None):
        #print(x.shape)
        batch_size = x.shape[0] # 获取批次大小 batch, time_stamp , feat_size
        # 初始化隐层状态
        h_0 = x.data.new(2*self.num_layers, batch_size, self.hidden_size).fill_(0).float()
        if hidden is not None:
            h_0 = hidden

        #print(h_0.size)
        # GRU 运算
        output, hidden = self.gru(x,h_0)
        output = self.layer_norm(output)

        last_output = output[:, -1, :]
        
        #print('output',last_output.shape)
        embed = self.embedding(id_label)
        #print("embed",embed.shape)
        #print('output',output.shape)
        concatenated = torch.cat((embed, last_output), dim=1)
        #print(concatenated.shape)
        
        # 全连接层
        output = self.fc(concatenated)  # 形状为batch_size * timestep, 1
        #print(output.shape)
        # 我们只需要返回最后一个时间片的数据即可
        return output
model = GRUModel(config.feature_size, config.hidden_size, config.num_layers, config.output_size)  # 定义LSTM网络

loss_function = nn.L1Loss()  # 定义损失函数
# class MAPELoss(nn.Module):
#     def __init__(self):
#         super(MAPELoss, self).__init__()

#     def forward(self, y_pred, y_true):
#         epsilon = 1e-8  # 用于避免除以零的小常数
#         absolute_error = torch.abs(y_true - y_pred)
#         relative_error = absolute_error / (torch.abs(y_true) + epsilon)
#         mape = torch.mean(relative_error) * 100
#         return mape
# loss_function = MAPELoss()  # 定义损失函数

optimizer = torch.optim.AdamW(model.parameters(), lr=0.01)  # 定义优化器
from tqdm import tqdm
 
# 8.模型训练
for epoch in range(500):
    model.train()
    running_loss = 0
    train_bar = tqdm(train_data_loader)  # 形成进度条
    for data in train_bar:
        x_train, y_train = data['his_feats'], data['labels']  # 解包迭代器中的X和Y
        optimizer.zero_grad()
        y_train_pred = model(x_train,data['input_ids'])
        loss = loss_function(y_train_pred, y_train.reshape(-1, 1))
        loss.backward()
        optimizer.step()
 
        running_loss += loss.item()
        train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1,
                                                                 config.epochs,
                                                                 loss)
 
    # 模型验证
    model.eval()
    test_loss = 0
    with torch.no_grad():
        test_bar = tqdm(val_data_loader)
        for data in test_bar:
            x_test, y_test = data['his_feats'], data['labels']
            y_test_pred = model(x_test, data['input_ids'])
            test_loss = loss_function(y_test_pred, y_test.reshape(-1, 1))
 
    if test_loss < config.best_loss:
        config.best_loss = test_loss
        torch.save(model.state_dict(), save_path)
 
print('Finished Training')
相关推荐
醒了就刷牙1 小时前
69 BERT预训练_by《李沐:动手学深度学习v2》pytorch版
pytorch·深度学习·bert
xiandong202 小时前
240930_CycleGAN循环生成对抗网络
图像处理·人工智能·深度学习·神经网络·学习·生成对抗网络
绎岚科技2 小时前
深度学习中的结构化概率模型 - 从图模型中采样篇
人工智能·深度学习·算法·机器学习
AI大模型_学习君3 小时前
大模型书籍强烈安利:《掌握NLP:从基础到大语言模型》(附PDF)
人工智能·深度学习·机器学习·语言模型·自然语言处理·pdf·ai大模型
如果能为勤奋颁奖5 小时前
YOLO11改进|注意力机制篇|引入上下文锚注意力机制CAA
人工智能·深度学习·yolo
程序员非鱼6 小时前
深度学习中的损失函数详解
人工智能·深度学习·计算机视觉·损失函数
SEU-WYL7 小时前
基于深度学习的3D人体姿态预测
人工智能·深度学习·3d
修炼室9 小时前
突触可塑性与STDP:神经网络中的自我调整机制
人工智能·深度学习·神经网络
FHYAAAX9 小时前
【机器学习】知识总结1(人工智能、机器学习、深度学习、贝叶斯、回归分析)
人工智能·深度学习·机器学习·贝叶斯·回归分析
人工智障调包侠9 小时前
基于深度学习多层感知机进行手机价格预测
人工智能·python·深度学习·机器学习·数据分析