注意力机制和自注意力机制模块的相关代码实现/缝合模块/即插即用模块

复制代码
注意力机制

import torch
import torch.nn as nn

class Attention(nn.Module):
    def __init__(self, hidden_dim):
        super(Attention, self).__init__()
        self.attention = nn.Linear(hidden_dim, 1, bias=False)

    def forward(self, encoder_outputs):
        # encoder_outputs shape: (batch_size, sequence_length, hidden_dim)
        attn_weights = self.attention(encoder_outputs)  # (batch_size, sequence_length, 1)
        attn_weights = torch.softmax(attn_weights, dim=1)  # (batch_size, sequence_length, 1)
        context = torch.sum(attn_weights * encoder_outputs, dim=1)  # (batch_size, hidden_dim)
        return context, attn_weights

# 示例用法
batch_size = 2
sequence_length = 5
hidden_dim = 10

encoder_outputs = torch.randn(batch_size, sequence_length, hidden_dim)
attention_layer = Attention(hidden_dim)
context, attn_weights = attention_layer(encoder_outputs)

print("Context:", context)
print("Attention Weights:", attn_weights)

自注意力机制

import torch

import torch.nn as nn

class MultiHeadSelfAttention(nn.Module):

def init(self, embed_dim, num_heads):

super(MultiHeadSelfAttention, self).init()

self.embed_dim = embed_dim

self.num_heads = num_heads

self.head_dim = embed_dim // num_heads

assert self.head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"

self.qkv_proj = nn.Linear(embed_dim, embed_dim * 3)

self.o_proj = nn.Linear(embed_dim, embed_dim)

self.softmax = nn.Softmax(dim=-1)

def forward(self, x):

batch_size, seq_length, embed_dim = x.size()

qkv = self.qkv_proj(x) # (batch_size, seq_length, embed_dim * 3)

qkv = qkv.reshape(batch_size, seq_length, self.num_heads, 3 * self.head_dim)

qkv = qkv.permute(0, 2, 1, 3) # (batch_size, num_heads, seq_length, 3 * head_dim)

q, k, v = qkv.chunk(3, dim=-1) # Each has shape (batch_size, num_heads, seq_length, head_dim)

attn_weights = torch.matmul(q, k.transpose(-2, -1)) / (self.head_dim ** 0.5) # Scaled dot-product

attn_weights = self.softmax(attn_weights) # (batch_size, num_heads, seq_length, seq_length)

attn_output = torch.matmul(attn_weights, v) # (batch_size, num_heads, seq_length, head_dim)

attn_output = attn_output.permute(0, 2, 1, 3).contiguous()

attn_output = attn_output.reshape(batch_size, seq_length, embed_dim)

output = self.o_proj(attn_output)

return output, attn_weights

示例用法

batch_size = 2

seq_length = 5

embed_dim = 16

num_heads = 4

x = torch.randn(batch_size, seq_length, embed_dim)

self_attention_layer = MultiHeadSelfAttention(embed_dim, num_heads)

output, attn_weights = self_attention_layer(x)

print("Output:", output)

print("Attention Weights:", attn_weights)

相关推荐
碧海银沙音频科技研究院9 小时前
1-1杰理蓝牙SOC的UI配置开发方法
人工智能·深度学习·算法
龙文浩_12 小时前
AI梯度下降与PyTorch张量操作技术指南
人工智能·pytorch·python·深度学习·神经网络·机器学习·自然语言处理
清空mega12 小时前
动手学深度学习——样式迁移
人工智能·深度学习
MRDONG113 小时前
Prompt Engineering进阶指南
人工智能·深度学习·神经网络·机器学习·自然语言处理
QQ6765800813 小时前
基于深度学习YOLO的苹果采摘点图像识别 苹果枝条分割识别 苹果分割检测 苹果茎叶分割识别 果园自动化采摘设备目标识别算法第10386期
深度学习·yolo·自动化·苹果采摘点图像·苹果枝条分割·苹果茎叶分割·果园自动化采摘设备
碧海银沙音频科技研究院13 小时前
虚拟机ubuntu与windows共享文件夹(Samba共享)解决WSL加载SI工程满卡问题
人工智能·深度学习·算法
小江的记录本14 小时前
【Transformer架构】Transformer架构核心知识体系(包括自注意力机制、多头注意力、Encoder-Decoder结构)
java·人工智能·后端·python·深度学习·架构·transformer
AI先驱体验官14 小时前
债小白分析:债务优化服务的新变量、AI能否带来行业升级
大数据·人工智能·深度学习·重构·aigc
SomeB1oody14 小时前
【Python深度学习】2.1. 卷积神经网络(CNN)模型理论(基础):卷积运算、池化、ReLU函数
开发语言·人工智能·python·深度学习·机器学习·cnn
sp_fyf_202417 小时前
【大语言模型】 WizardLM:赋能大型预训练语言模型以遵循复杂指令
人工智能·深度学习·神经网络·语言模型·自然语言处理