Transformer——多头注意力机制(Pytorch)

  1. 原理图

  2. 代码

python 复制代码
import torch
import torch.nn as nn


class Multi_Head_Self_Attention(nn.Module):
    def __init__(self, embed_size, heads):
        super(Multi_Head_Self_Attention, self).__init__()
        self.embed_size = embed_size
        self.heads = heads
        self.head_dim = embed_size // heads

        self.queries = nn.Linear(self.embed_size, self.embed_size, bias=False)
        self.keys = nn.Linear(self.embed_size, self.embed_size, bias=False)
        self.values = nn.Linear(self.embed_size, self.embed_size, bias=False)
        self.fc_out = nn.Linear(self.embed_size, self.embed_size, bias=False)

    def forward(self,queries, keys, values, mask):
        N = queries.shape[0]  # batch_size
        query_len = queries.shape[1]  # sequence_length
        key_len = keys.shape[1]  # sequence_length 
        value_len = values.shape[1]  # sequence_length

        queries = self.queries(queries)
        keys = self.keys(keys)
        values = self.values(values)

        # Split the embedding into self.heads pieces
        # batch_size, sequence_length, embed_size(512) --> 
        # batch_size, sequence_length, heads(8), head_dim(64)
        queries = queries.reshape(N, query_len, self.heads, self.head_dim)
        keys = keys.reshape(N, key_len, self.heads, self.head_dim)
        values = values.reshape(N, value_len, self.heads, self.head_dim)

        # batch_size, sequence_length, heads(8), head_dim(64) --> 
        # batch_size, heads(8), sequence_length, head_dim(64)
        queries = queries.transpose(1, 2)
        keys = keys.transpose(1, 2)
        values = values.transpose(1, 2)

        # Scaled dot-product attention
        score = torch.matmul(queries, keys.transpose(-2, -1)) / (self.head_dim ** (1/2))

        if mask is not None:
            score = score.masked_fill(mask == 0, float("-inf"))
        # batch_size, heads(8), sequence_length, sequence_length
        attention = torch.softmax(score, dim=-1)

        out = torch.matmul(attention, values)
        # batch_size, heads(8), sequence_length, head_dim(64) -->
        # batch_size, sequence_length, heads(8), head_dim(64) -->
        # batch_size, sequence_length, embed_size(512)
        # 为了方便送入后面的网络
        out = out.transpose(1, 2).contiguous().reshape(N, query_len, self.embed_size)
        out = self.fc_out(out)

        return out
    

batch_size = 64
sequence_length = 10
embed_size = 512
heads = 8
mask = None

Q = torch.randn(batch_size, sequence_length, embed_size)  
K = torch.randn(batch_size, sequence_length, embed_size)  
V = torch.randn(batch_size, sequence_length, embed_size)  

model = Multi_Head_Self_Attention(embed_size, heads)
output = model(Q, K, V, mask)
print(output.shape)
相关推荐
PKNLP2 小时前
深度学习之神经网络1(Neural Network)
人工智能·深度学习·神经网络
AI新兵4 小时前
深度学习基础:从原理到实践——第一章感知机(中)
人工智能·深度学习
liliangcsdn4 小时前
从LLM角度学习和了解MoE架构
人工智能·学习·transformer
CH3_CH2_CHO4 小时前
DAY03:【DL 第一弹】神经网络
人工智能·pytorch·深度学习·神经网络
apocalypsx5 小时前
深度学习-Kaggle实战1(房价预测)
人工智能·深度学习
春末的南方城市5 小时前
开放指令编辑创新突破!小米开源 Lego-Edit 登顶 SOTA:用强化学习为 MLLM 编辑开辟全新赛道!
人工智能·深度学习·机器学习·计算机视觉·aigc
java1234_小锋6 小时前
TensorFlow2 Python深度学习 - TensorFlow2框架入门 - 计算图和 tf.function 简介
python·深度学习·tensorflow·tensorflow2
红宝村村长7 小时前
【学习笔记】从零构建大模型
深度学习
递归不收敛8 小时前
吴恩达机器学习课程(PyTorch 适配)学习笔记:3.4 强化学习
pytorch·学习·机器学习
StarPrayers.8 小时前
卷积层(Convolutional Layer)学习笔记
人工智能·笔记·深度学习·学习·机器学习