Transformer——多头注意力机制(Pytorch)

  1. 原理图

  2. 代码

python 复制代码
import torch
import torch.nn as nn


class Multi_Head_Self_Attention(nn.Module):
    def __init__(self, embed_size, heads):
        super(Multi_Head_Self_Attention, self).__init__()
        self.embed_size = embed_size
        self.heads = heads
        self.head_dim = embed_size // heads

        self.queries = nn.Linear(self.embed_size, self.embed_size, bias=False)
        self.keys = nn.Linear(self.embed_size, self.embed_size, bias=False)
        self.values = nn.Linear(self.embed_size, self.embed_size, bias=False)
        self.fc_out = nn.Linear(self.embed_size, self.embed_size, bias=False)

    def forward(self,queries, keys, values, mask):
        N = queries.shape[0]  # batch_size
        query_len = queries.shape[1]  # sequence_length
        key_len = keys.shape[1]  # sequence_length 
        value_len = values.shape[1]  # sequence_length

        queries = self.queries(queries)
        keys = self.keys(keys)
        values = self.values(values)

        # Split the embedding into self.heads pieces
        # batch_size, sequence_length, embed_size(512) --> 
        # batch_size, sequence_length, heads(8), head_dim(64)
        queries = queries.reshape(N, query_len, self.heads, self.head_dim)
        keys = keys.reshape(N, key_len, self.heads, self.head_dim)
        values = values.reshape(N, value_len, self.heads, self.head_dim)

        # batch_size, sequence_length, heads(8), head_dim(64) --> 
        # batch_size, heads(8), sequence_length, head_dim(64)
        queries = queries.transpose(1, 2)
        keys = keys.transpose(1, 2)
        values = values.transpose(1, 2)

        # Scaled dot-product attention
        score = torch.matmul(queries, keys.transpose(-2, -1)) / (self.head_dim ** (1/2))

        if mask is not None:
            score = score.masked_fill(mask == 0, float("-inf"))
        # batch_size, heads(8), sequence_length, sequence_length
        attention = torch.softmax(score, dim=-1)

        out = torch.matmul(attention, values)
        # batch_size, heads(8), sequence_length, head_dim(64) -->
        # batch_size, sequence_length, heads(8), head_dim(64) -->
        # batch_size, sequence_length, embed_size(512)
        # 为了方便送入后面的网络
        out = out.transpose(1, 2).contiguous().reshape(N, query_len, self.embed_size)
        out = self.fc_out(out)

        return out
    

batch_size = 64
sequence_length = 10
embed_size = 512
heads = 8
mask = None

Q = torch.randn(batch_size, sequence_length, embed_size)  
K = torch.randn(batch_size, sequence_length, embed_size)  
V = torch.randn(batch_size, sequence_length, embed_size)  

model = Multi_Head_Self_Attention(embed_size, heads)
output = model(Q, K, V, mask)
print(output.shape)
相关推荐
thesky12345626 分钟前
llama factory怎么命令行推理图片
深度学习·llama
契合qht53_shine39 分钟前
深度学习 视觉处理(CNN) day_01
人工智能·深度学习·cnn
是瑶瑶子啦1 小时前
【深度学习】多头注意力机制的实现|pytorch
人工智能·pytorch·深度学习
进来有惊喜2 小时前
循环神经网络RNN---LSTM
人工智能·rnn·深度学习
零零刷5 小时前
德州仪器(TI)—TDA4VM芯片详解(1)—产品特性
人工智能·嵌入式硬件·深度学习·神经网络·自动驾驶·硬件架构·硬件工程
搏博5 小时前
机器学习之三:归纳学习
人工智能·深度学习·学习·机器学习
Mu先生Ai世界6 小时前
AI 生成 3D 技术解析:驱动力、价值主张与核心挑战 (AI+3D 产品经理笔记 S2E01)
人工智能·游戏·3d·aigc·transformer·产品经理·vr
何双新7 小时前
第2讲、Tensor高级操作与自动求导详解
深度学习
悲喜自渡7217 小时前
pytorch & python常用指令
人工智能·pytorch·python
龙萱坤诺7 小时前
图像生成新势力:GPT-Image-1 与 GPT-4o 在智创聚合 API 的较量
人工智能·深度学习·计算机视觉