TR3--Transformer之pytorch复现

python 复制代码
import math
import torch
import torch.nn as nn
device = torch.device("cpu")
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
python 复制代码
import torch.nn as nn
class Transpose(nn.Module):
    def __init__(self,*dims,contiguous=False):
        super().__init__()
        self.dims=dims
        self.contiguous=contiguous

    def forward(self,x):
        if self.contiguous:
            return x.transpose(*self.dims).contiguous()
        else:
            return x.transpose(*self.dims)  # 转换形式
python 复制代码
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
    def __init__(self,d_k:int):
        super().__init__()
        self.d_k=d_k
    def forward(self,q,k,v,mask=None):
        # 计算注意力的核心步骤
        scores=torch.matmul(q,k) #计算矩阵
        # 缩放分数
        scores=scores/(self.d_k**0.5)
        if mask is not None:
            scores.masked_fill_(mask,-1e9)

        attn=F.softmax(scores,dim=-1) #分数应用softmax得到注意力权重
        context=torch.matmul(attn,v) #根据注意力权重加权求和值向量
        return context 
python 复制代码
class MultiHeadAttention(nn.Module):
    def __init__(self,d_model,n_heads):
        super().__init__()
        self.d_k=d_model//n_heads
        self.d_v=d_model//n_heads
        self.n_heads=n_heads
        self.W_Q=nn.Linear(d_model,self.d_k*n_heads,bias=False)
        self.W_K=nn.Linear(d_model,self.d_k*n_heads,bias=False)
        self.W_V=nn.Linear(d_model,self.d_v*n_heads,bias=False)
        self.W_O=nn.Linear(n_heads*self.d_v,d_model,bias=False)

    def forward(self,Q,K,V,mask=None):
        bs=Q.size(0)
        q_s=self.W_Q(Q).view(bs,-1,self.n_heads,self.d_k).transpose(1,2)
        k_s=self.W_K(K).view(bs,-1,self.n_heads,self.d_k).permute(0,2,3,1)
        v_s=self.W_V(V).view(bs,-1,self.n_heads,self.d_v).transpose(1,2)
        context=ScaledDotProductAttention(self.d_k)(q_s,k_s,v_s)
        context=context.transpose(1,2).contiguous().view(bs,-1,self.n_heads*self.d_v)
        output=self.W_O(context)

        return output 
        
python 复制代码
class Feedforward(nn.Module):
    def __init__(self,d_model,d_ff,dropout=0.1):
        super().__init__()
        self.linear1=nn.Linear(d_model,d_ff)
        self.dropout=nn.Dropout(dropout)
        self.linear2=nn.Linear(d_ff,d_model)

    def forward(self,x):
        x=torch.nn.functional.relu(self.linear1(x))
        x=self.dropout(x)
        x=self.linear2(x)
        return x 
python 复制代码
class PositionalEncoding(nn.Module):
    def __init__(self,d_model,dropout,max_len=5000):
        super().__init__()
        self.dropout=nn.Dropout(p=dropout)
        pe=torch.zeros(max_len,d_model).to(device)
        position=torch.arange(0,max_len).unsqueeze(1)
        div_term=torch.exp(torch.arange(0,d_model,2)*(math.log(10000.0)/d_model))
        pe[:,0::2]=torch.sin(position*div_term) #什么意思,
        pe[:,1::2]=torch.cos(position*div_term) #计算PE(pos,2i+1)
        pe=pe.unsqueeze(0) #计算

        self.register_buffer('pe',pe)

    def forward(self,x):
        
        print(x.device)
        x = x + self.pe[:x.size(1), :].transpose(0, 1).to(device)
        print(x.device)
        return self.dropout(x)
        
python 复制代码
class EncoderLayer(nn.Module):
    def __init__(self,d_model,n_heads,d_ff,dropout=0.1):
        super().__init__()
        self.self_attn=MultiHeadAttention(d_model,n_heads)
        self.feedforward=Feedforward(d_model,d_ff,dropout)
        self.norm1=nn.LayerNorm(d_model)
        self.norm2=nn.LayerNorm(d_model)
        self.dropout=nn.Dropout(dropout)

    def forward(self,x,mask):
        attn_output=self.self_attn(x,x,x,mask)
        x=x+self.dropout(attn_output)
        x=self.norm1(x)
        ff_output=self.feedforward(x)
        x=x+self.dropout(ff_output)
        x=self.norm2(x)
        return x
        
python 复制代码
class DecoderLayer(nn.Module):
    def __init__(self,d_model,n_heads,d_ff,dropout=0.1):
        super().__init__()
        self.self_attn=MultiHeadAttention(d_model,n_heads)
        self.enc_attn=MultiHeadAttention(d_model,n_heads)
        self.feedforward=Feedforward(d_model,d_ff,dropout)
        self.norm1=nn.LayerNorm(d_model)
        self.norm2=nn.LayerNorm(d_model)
        self.norm3=nn.LayerNorm(d_model)
        self.dropout=nn.Dropout(dropout)

    def forward(self,x,enc_output,self_mask,context_mask):
        attn_output=self.self_attn(x,x,x,self_mask)
        x=x+self.dropout(attn_output)
        x=self.norm1(x)

        attn_output=self.enc_attn(x,enc_output,enc_output,context_mask)
        x=x+self.dropout(attn_output)
        x=self.norm2(x)

        ff_output=self.feedforward(x)
        x=x+self.dropout(ff_output)
        x=self.norm3(x)
        return x
python 复制代码
# 构建 
class Transformer(nn.Module):
    def __init__(self,vocab_size,d_model,n_heads,n_encoder_layers,n_decoder_layers,d_ff,dropout=0.1):
        super().__init__()
        self.embedding=nn.Embedding(vocab_size,d_model)
        self.positional_encoding=PositionalEncoding(d_model,dropout)
        self.encoder_layers=nn.ModuleList([EncoderLayer(d_model,n_heads,d_ff,dropout) for _ in range(n_encoder_layers)])
        self.decoder_layers=nn.ModuleList([DecoderLayer(d_model,n_heads,d_ff,dropout) for _ in range(n_decoder_layers)])
        self.fc_out=nn.Linear(d_model,vocab_size)
        self.dropout=nn.Dropout(dropout)

    def forward(self,src,trg,src_mask,trg_mask):
        src=self.embedding(src)
        src=self.positional_encoding(src)
        trg=self.embedding(trg)
        trg=self.positional_encoding(trg)

        for layer in self.encoder_layers:
            src=layer(src,src_mask)

        for layer in self.decoder_layers:
            trg=layer(trg,src,trg_mask,src_mask)

        output=self.fc_out(trg)
        return output 
        
python 复制代码
#使用示例
vocab_size =10000 #假设词汇表大小为10000
d_model=512
n_heads=8
n_encoder_layers=6
n_decoder_layers=6
d_ff = 2048
dropout =0.1
 
transformer_model = Transformer(vocab_size, d_model, n_heads, n_encoder_layers, n_decoder_layers, d_ff, dropout)
 
#定义输入,这里的输入是假设的,需要根据实际情况修改
src=torch.randint(0,vocab_size,(32,10)) #源语言句子
trg=torch.randint(0,vocab_size,(32,20)) #目标语言句子#掩码,用于屏蔽填充的位置
src_mask=(src !=0).unsqueeze(1).unsqueeze(2)
trg_mask =(trg !=0).unsqueeze(1).unsqueeze(2) #掩码,用于屏蔽填充的位置
 
print("实际|输入数据维度:",src.shape)
print("预期|输出数据维度:",trg.shape)
output =transformer_model(src,trg,src_mask,trg_mask)
print("实际|输出数据维度:",output.shape)
python 复制代码
实际|输入数据维度: torch.Size([32, 10])
预期|输出数据维度: torch.Size([32, 20])
实际|输出数据维度: torch.Size([32, 20, 10000])
相关推荐
Full Stack Developme3 小时前
jdk.random 包详解
java·开发语言·python
北京耐用通信3 小时前
电力自动化新突破:Modbus如何变身Profinet?智能仪表连接的终极解决方案
人工智能·物联网·网络安全·自动化·信息与通信
m***记3 小时前
Python 数据分析入门:Pandas vs NumPy 全方位对比
python·数据分析·pandas
MYX_3094 小时前
第七章 完整的模型训练
pytorch·python·深度学习·学习
golang学习记4 小时前
VSCode Copilot 编码智能体实战指南:让 AI 自主开发,你只负责 Review!
人工智能
渡我白衣4 小时前
深度学习进阶(八)——AI 操作系统的雏形:AgentOS、Devin 与多智能体协作
人工智能·深度学习
新子y4 小时前
【小白笔记】岛屿数量
笔记·python
万岳软件开发小城4 小时前
AI数字人系统源码+AI数字人小程序开发:2025年热门AI项目
人工智能·开源·软件开发·app开发·ai数字人小程序·ai数字人系统源码
CLubiy4 小时前
【研究生随笔】Pytorch中的线性代数
pytorch·python·深度学习·线性代数·机器学习