复制代码
import torch
from torch import nn
from torch.nn import functional as F
import d2l
import test_53LanguageModel
import test_55RNNdifficult_realize
batch_size,num_steps=32,35
train_iter,vocab=test_53LanguageModel.load_data_time_machine(batch_size,num_steps)
#定义模型
num_hiddens=512
rnn_layer=nn.RNN(len(vocab),num_hiddens)
#使用张量初始化隐藏状态,形状(隐藏数,批量大小,隐藏单元)
state=torch.zeros((1,batch_size,num_hiddens))
# print(state.shape)
# X=torch.randn(size=(num_steps,batch_size,len(vocab)))
# Y,state_new=rnn_layer(X,state)
# print(Y.shape,state_new.shape)
class RNNModel(nn.Module):
def __init__(self,rnn_layer,vocab_size,**kwargs):
super(RNNModel,self).__init__(**kwargs)
self.rnn=rnn_layer
self.vocab_size=vocab_size
self.num_hiddens=self.rnn.hidden_size
if not self.rnn.bidirectional:
self.num_derections=1
self.linear=nn.Linear(self.num_hiddens,self.vocab_size)
else:
self.num_derections=2
self.linear=nn.Linear(self.num_hiddens*2,self.vocab_size)
def forward(self,inputs,state):
X=F.one_hot(inputs.T.long(),self.vocab_size)#将输入变成(num_steps, batch_size, vocab_size)
X=X.to(torch.float32)
y,state=self.rnn(X,state)
output=self.linear(y.reshape((-1,y.shape[-1])))#将y转成二维(num_steps*batch_size,vocab_size)
return output,state
def begin_state(self,device,batch_size=1):
if not isinstance(self.rnn,nn.LSTM):
return torch.zeros((self.num_derections*self.rnn.num_layers,batch_size,self.num_hiddens),device=device)#(层数*方向数,batch,隐藏数量)
else:
return (torch.zeros((self.num_derections*self.rnn.num_layers,batch_size,self.num_hiddens),device=device),
torch.zeros((self.num_derections * self.rnn.num_layers, batch_size, self.num_hiddens),
device=device)
)
device=d2l.try_gpu()
net=RNNModel(rnn_layer,vocab_size=len(vocab))
net=net.to(device=device)
print(test_55RNNdifficult_realize.predict_ch8('time traveller',10,net,vocab,device))