Transformer——多头注意力机制(Pytorch)

  1. 原理图

  2. 代码

python 复制代码
import torch
import torch.nn as nn


class Multi_Head_Self_Attention(nn.Module):
    def __init__(self, embed_size, heads):
        super(Multi_Head_Self_Attention, self).__init__()
        self.embed_size = embed_size
        self.heads = heads
        self.head_dim = embed_size // heads

        self.queries = nn.Linear(self.embed_size, self.embed_size, bias=False)
        self.keys = nn.Linear(self.embed_size, self.embed_size, bias=False)
        self.values = nn.Linear(self.embed_size, self.embed_size, bias=False)
        self.fc_out = nn.Linear(self.embed_size, self.embed_size, bias=False)

    def forward(self,queries, keys, values, mask):
        N = queries.shape[0]  # batch_size
        query_len = queries.shape[1]  # sequence_length
        key_len = keys.shape[1]  # sequence_length 
        value_len = values.shape[1]  # sequence_length

        queries = self.queries(queries)
        keys = self.keys(keys)
        values = self.values(values)

        # Split the embedding into self.heads pieces
        # batch_size, sequence_length, embed_size(512) --> 
        # batch_size, sequence_length, heads(8), head_dim(64)
        queries = queries.reshape(N, query_len, self.heads, self.head_dim)
        keys = keys.reshape(N, key_len, self.heads, self.head_dim)
        values = values.reshape(N, value_len, self.heads, self.head_dim)

        # batch_size, sequence_length, heads(8), head_dim(64) --> 
        # batch_size, heads(8), sequence_length, head_dim(64)
        queries = queries.transpose(1, 2)
        keys = keys.transpose(1, 2)
        values = values.transpose(1, 2)

        # Scaled dot-product attention
        score = torch.matmul(queries, keys.transpose(-2, -1)) / (self.head_dim ** (1/2))

        if mask is not None:
            score = score.masked_fill(mask == 0, float("-inf"))
        # batch_size, heads(8), sequence_length, sequence_length
        attention = torch.softmax(score, dim=-1)

        out = torch.matmul(attention, values)
        # batch_size, heads(8), sequence_length, head_dim(64) -->
        # batch_size, sequence_length, heads(8), head_dim(64) -->
        # batch_size, sequence_length, embed_size(512)
        # 为了方便送入后面的网络
        out = out.transpose(1, 2).contiguous().reshape(N, query_len, self.embed_size)
        out = self.fc_out(out)

        return out
    

batch_size = 64
sequence_length = 10
embed_size = 512
heads = 8
mask = None

Q = torch.randn(batch_size, sequence_length, embed_size)  
K = torch.randn(batch_size, sequence_length, embed_size)  
V = torch.randn(batch_size, sequence_length, embed_size)  

model = Multi_Head_Self_Attention(embed_size, heads)
output = model(Q, K, V, mask)
print(output.shape)
相关推荐
fpcc31 分钟前
AI和大模型——神经网络
人工智能·深度学习·神经网络
神经星星36 分钟前
【TVM教程】理解 Relax 抽象层
人工智能·深度学习·机器学习
补三补四1 小时前
模型编辑入门:给AI“打补丁”的核心技术解析
人工智能·深度学习
jeffsonfu2 小时前
偏差与方差的权衡:深度学习的“中庸之道”
人工智能·深度学习
暂未成功人士!2 小时前
语义分割标注工具(isat-sam):利用 AI 辅助完成深度学习语义分割的图像数据标注
人工智能·深度学习·语义分割·数据标注
码上掘金2 小时前
基于深度学习与大语言模型的皮肤病智能辅助诊断系统
人工智能·深度学习·语言模型
龙文浩_2 小时前
AI深度学习核心机制解析
人工智能·pytorch·深度学习·神经网络
盘古开天16662 小时前
从 DQN 到机器人导航:用深度 Q 网络让小车学会自己走路(含 PyTorch 代码)
人工智能·pytorch·机器人
这张生成的图像能检测吗3 小时前
(论文速读)FD-LLM:将振动信号编码为文本表示来将振动信号与大型语言模型进行对齐
人工智能·深度学习·语言模型·智能制造·故障诊断
阿钱真强道3 小时前
01 飞腾 S5000C 服务器环境搭建实战:PyTorch + CUDA + RTX 4090D 安装与验证
pytorch·cuda·aarch64·深度学习环境搭建·飞腾服务器·s5000c·rtx4090d