**🍨 本文为[🔗365天深度学习训练营](https://mp.weixin.qq.com/s/o-DaK6aQQLkJ8uE4YX1p3Q) 中的学习记录博客**
**🍖 原作者:[K同学啊](https://mtyjkh.blog.csdn.net/)**
python
import math
import torch
import torch.nn as nn
device = torch.device("cpu")
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
python
import torch.nn as nn
class Transpose(nn.Module):
def __init__(self,*dims,contiguous=False):
super().__init__()
self.dims=dims
self.contiguous=contiguous
def forward(self,x):
if self.contiguous:
return x.transpose(*self.dims).contiguous()
else:
return x.transpose(*self.dims) # 转换形式
python
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
def __init__(self,d_k:int):
super().__init__()
self.d_k=d_k
def forward(self,q,k,v,mask=None):
# 计算注意力的核心步骤
scores=torch.matmul(q,k) #计算矩阵
# 缩放分数
scores=scores/(self.d_k**0.5)
if mask is not None:
scores.masked_fill_(mask,-1e9)
attn=F.softmax(scores,dim=-1) #分数应用softmax得到注意力权重
context=torch.matmul(attn,v) #根据注意力权重加权求和值向量
return context
python
class MultiHeadAttention(nn.Module):
def __init__(self,d_model,n_heads):
super().__init__()
self.d_k=d_model//n_heads
self.d_v=d_model//n_heads
self.n_heads=n_heads
self.W_Q=nn.Linear(d_model,self.d_k*n_heads,bias=False)
self.W_K=nn.Linear(d_model,self.d_k*n_heads,bias=False)
self.W_V=nn.Linear(d_model,self.d_v*n_heads,bias=False)
self.W_O=nn.Linear(n_heads*self.d_v,d_model,bias=False)
def forward(self,Q,K,V,mask=None):
bs=Q.size(0)
q_s=self.W_Q(Q).view(bs,-1,self.n_heads,self.d_k).transpose(1,2)
k_s=self.W_K(K).view(bs,-1,self.n_heads,self.d_k).permute(0,2,3,1)
v_s=self.W_V(V).view(bs,-1,self.n_heads,self.d_v).transpose(1,2)
context=ScaledDotProductAttention(self.d_k)(q_s,k_s,v_s)
context=context.transpose(1,2).contiguous().view(bs,-1,self.n_heads*self.d_v)
output=self.W_O(context)
return output
python
class Feedforward(nn.Module):
def __init__(self,d_model,d_ff,dropout=0.1):
super().__init__()
self.linear1=nn.Linear(d_model,d_ff)
self.dropout=nn.Dropout(dropout)
self.linear2=nn.Linear(d_ff,d_model)
def forward(self,x):
x=torch.nn.functional.relu(self.linear1(x))
x=self.dropout(x)
x=self.linear2(x)
return x
python
class PositionalEncoding(nn.Module):
def __init__(self,d_model,dropout,max_len=5000):
super().__init__()
self.dropout=nn.Dropout(p=dropout)
pe=torch.zeros(max_len,d_model).to(device)
position=torch.arange(0,max_len).unsqueeze(1)
div_term=torch.exp(torch.arange(0,d_model,2)*(math.log(10000.0)/d_model))
pe[:,0::2]=torch.sin(position*div_term) #什么意思,
pe[:,1::2]=torch.cos(position*div_term) #计算PE(pos,2i+1)
pe=pe.unsqueeze(0) #计算
self.register_buffer('pe',pe)
def forward(self,x):
print(x.device)
x = x + self.pe[:x.size(1), :].transpose(0, 1).to(device)
print(x.device)
return self.dropout(x)
python
class EncoderLayer(nn.Module):
def __init__(self,d_model,n_heads,d_ff,dropout=0.1):
super().__init__()
self.self_attn=MultiHeadAttention(d_model,n_heads)
self.feedforward=Feedforward(d_model,d_ff,dropout)
self.norm1=nn.LayerNorm(d_model)
self.norm2=nn.LayerNorm(d_model)
self.dropout=nn.Dropout(dropout)
def forward(self,x,mask):
attn_output=self.self_attn(x,x,x,mask)
x=x+self.dropout(attn_output)
x=self.norm1(x)
ff_output=self.feedforward(x)
x=x+self.dropout(ff_output)
x=self.norm2(x)
return x
python
class DecoderLayer(nn.Module):
def __init__(self,d_model,n_heads,d_ff,dropout=0.1):
super().__init__()
self.self_attn=MultiHeadAttention(d_model,n_heads)
self.enc_attn=MultiHeadAttention(d_model,n_heads)
self.feedforward=Feedforward(d_model,d_ff,dropout)
self.norm1=nn.LayerNorm(d_model)
self.norm2=nn.LayerNorm(d_model)
self.norm3=nn.LayerNorm(d_model)
self.dropout=nn.Dropout(dropout)
def forward(self,x,enc_output,self_mask,context_mask):
attn_output=self.self_attn(x,x,x,self_mask)
x=x+self.dropout(attn_output)
x=self.norm1(x)
attn_output=self.enc_attn(x,enc_output,enc_output,context_mask)
x=x+self.dropout(attn_output)
x=self.norm2(x)
ff_output=self.feedforward(x)
x=x+self.dropout(ff_output)
x=self.norm3(x)
return x
python
# 构建
class Transformer(nn.Module):
def __init__(self,vocab_size,d_model,n_heads,n_encoder_layers,n_decoder_layers,d_ff,dropout=0.1):
super().__init__()
self.embedding=nn.Embedding(vocab_size,d_model)
self.positional_encoding=PositionalEncoding(d_model,dropout)
self.encoder_layers=nn.ModuleList([EncoderLayer(d_model,n_heads,d_ff,dropout) for _ in range(n_encoder_layers)])
self.decoder_layers=nn.ModuleList([DecoderLayer(d_model,n_heads,d_ff,dropout) for _ in range(n_decoder_layers)])
self.fc_out=nn.Linear(d_model,vocab_size)
self.dropout=nn.Dropout(dropout)
def forward(self,src,trg,src_mask,trg_mask):
src=self.embedding(src)
src=self.positional_encoding(src)
trg=self.embedding(trg)
trg=self.positional_encoding(trg)
for layer in self.encoder_layers:
src=layer(src,src_mask)
for layer in self.decoder_layers:
trg=layer(trg,src,trg_mask,src_mask)
output=self.fc_out(trg)
return output
python
#使用示例
vocab_size =10000 #假设词汇表大小为10000
d_model=512
n_heads=8
n_encoder_layers=6
n_decoder_layers=6
d_ff = 2048
dropout =0.1
transformer_model = Transformer(vocab_size, d_model, n_heads, n_encoder_layers, n_decoder_layers, d_ff, dropout)
#定义输入,这里的输入是假设的,需要根据实际情况修改
src=torch.randint(0,vocab_size,(32,10)) #源语言句子
trg=torch.randint(0,vocab_size,(32,20)) #目标语言句子#掩码,用于屏蔽填充的位置
src_mask=(src !=0).unsqueeze(1).unsqueeze(2)
trg_mask =(trg !=0).unsqueeze(1).unsqueeze(2) #掩码,用于屏蔽填充的位置
print("实际|输入数据维度:",src.shape)
print("预期|输出数据维度:",trg.shape)
output =transformer_model(src,trg,src_mask,trg_mask)
print("实际|输出数据维度:",output.shape)
python
实际|输入数据维度: torch.Size([32, 10])
预期|输出数据维度: torch.Size([32, 20])
实际|输出数据维度: torch.Size([32, 20, 10000])