简介:
主要介绍一下
sin(x): 为 数据
cos(x): 为对应的label
项目包括两个文件
模型的训练,验证,参数保存
模型的构建
目录:
一 lstm.py
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 8 14:01:15 2023
@author: chengxf2
"""
import torch
import torch.nn as nn
class LSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, num_lay, b_first):
super(LSTM,self).__init__()
self.lstm = nn.LSTM(input_size = input_dim, hidden_size = hidden_dim, num_layers = num_lay, batch_first=b_first)
self.linear = nn.Linear(hidden_dim, 1)
def forward(self, X):
#X.shape:[batch_size=1, seq_num=256, input_size=1]
output, (hidden, cell) = self.lstm(X)
outs =[]
seq_num = output.size(1)
#output:[batch_size, seq_num, hidden_dim=64]
#hidden.shape:[num_layer, batch_size, hiden_size]
#print("\n output.shape",output.shape)
#print("\n hidden.shape",hidden.shape)
for time_step in range(seq_num):
#h.shape[batch, hidden_dim]
h = output[:,time_step,:]
#print("\n h",h.shape)
out = self.linear(h)
outs.append(out)
#沿着一个新维度对输入张量序列进行连接。
#[batch, seq_num, 1]
pred = torch.stack(outs, dim=1)
return pred
二 main.py
import numpy as np
from matplotlib import pyplot as plt
import torch
from lstm import LSTM
import torch.nn as nn
from torch.nn import functional as F
from torch import optim
import time
def showDiff(pred, label, steps):
plt.figure()
plt.rcParams['font.family'] = 'SimHei' # 正常显示中文
plt.title('预测值 and 真实值', fontsize='18')
plt.plot(steps, pred.cpu().data.numpy().flatten(),color='r',label='预测值')
plt.plot(steps, label.cpu().data.numpy().flatten(), color='g',label='真实值')
plt.legend(loc='best')
plt.show()
def get_data(epoch):
TIME_STEP = 256
start, end = epoch*np.pi, epoch*np.pi+2*np.pi
steps = np.linspace(start,end,TIME_STEP,dtype=np.float32)
sin_x = np.sin(steps)
cos_x = np.cos(steps)
sinx_torch = torch.from_numpy(sin_x[np.newaxis, :, np.newaxis])
if torch.cuda.is_available():
sinx_torch = torch.from_numpy(sin_x[np.newaxis,:,np.newaxis]).cuda()
# print('sinx_input.shape:',sinx_input.shape)
cosx_lable = torch.from_numpy(cos_x[np.newaxis, :, np.newaxis]).cuda()
else:
sinx_torch = torch.from_numpy(sin_x[np.newaxis, :, np.newaxis])
# print('sinx_input.shape:',sinx_input.shape)
cosx_lable = torch.from_numpy(cos_x[np.newaxis, :, np.newaxis])
# [batch,seq_num,input_size] (1,256,1)
return sinx_torch,cosx_lable,steps
def eval(model):
#等同于 self.train(False) 就是评估模式。
#在评估模式下,batchNorm层,dropout层等用于优化训练而添加的网络层会被关闭,从而使得评估时不会发生偏移
model.eval()
test_data,test_label,steps = get_data(2)
with torch.no_grad():
y_pred = model(test_data)
showDiff(y_pred, test_label, steps)
def train(model,maxIter,criterion):
'''
训练模型
----------
model : lstm 模型.
maxIter : 迭代次数.
criterion : 损失函数
------
'''
#作用是启用 batch normalization 和 dropout
model.train()
time_stamp = time.time()
for epoch in range(maxIter):
sinx_torch,cosx_lable,steps = get_data(epoch)
y_pre = model(sinx_torch)
loss = criterion(y_pre,cosx_lable)
optimzer.zero_grad()
loss.backward()
optimzer.step()
if epoch%100==0:
data_time_interval = time.time() - time_stamp
print('epoch: %d loss: %7.3f interval: %6.2f'%(epoch, loss.detach().numpy(),data_time_interval))
#torch.save(model.state_dict(), 'model_params.pth')
showDiff(y_pre, cosx_lable,steps)
if __name__ == '__main__':
input_dim =1
hidden_dim = 64
num_layers =2
batch_first = True
maxIter = 3000
model = LSTM(input_dim, hidden_dim, num_layers, batch_first)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
optimzer = optim.Adam(model.parameters(),lr=0.0001,weight_decay=0.00001)
criterion = nn.MSELoss()
model.to(DEVICE)
criterion.to(DEVICE)
train(model,maxIter,criterion)
#model.load_state_dict(torch.load('model_params.pth',map_location='cpu'))
#eval(model)
参考:
pytorch利用rnn通过sin预测cos 利用lstm预测手写数字_pytorch lstm cos_薛定谔的智能的博客-CSDN博客