注意力机制和自注意力机制模块的相关代码实现/缝合模块/即插即用模块

复制代码
注意力机制

import torch
import torch.nn as nn

class Attention(nn.Module):
    def __init__(self, hidden_dim):
        super(Attention, self).__init__()
        self.attention = nn.Linear(hidden_dim, 1, bias=False)

    def forward(self, encoder_outputs):
        # encoder_outputs shape: (batch_size, sequence_length, hidden_dim)
        attn_weights = self.attention(encoder_outputs)  # (batch_size, sequence_length, 1)
        attn_weights = torch.softmax(attn_weights, dim=1)  # (batch_size, sequence_length, 1)
        context = torch.sum(attn_weights * encoder_outputs, dim=1)  # (batch_size, hidden_dim)
        return context, attn_weights

# 示例用法
batch_size = 2
sequence_length = 5
hidden_dim = 10

encoder_outputs = torch.randn(batch_size, sequence_length, hidden_dim)
attention_layer = Attention(hidden_dim)
context, attn_weights = attention_layer(encoder_outputs)

print("Context:", context)
print("Attention Weights:", attn_weights)

自注意力机制

import torch

import torch.nn as nn

class MultiHeadSelfAttention(nn.Module):

def init(self, embed_dim, num_heads):

super(MultiHeadSelfAttention, self).init()

self.embed_dim = embed_dim

self.num_heads = num_heads

self.head_dim = embed_dim // num_heads

assert self.head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"

self.qkv_proj = nn.Linear(embed_dim, embed_dim * 3)

self.o_proj = nn.Linear(embed_dim, embed_dim)

self.softmax = nn.Softmax(dim=-1)

def forward(self, x):

batch_size, seq_length, embed_dim = x.size()

qkv = self.qkv_proj(x) # (batch_size, seq_length, embed_dim * 3)

qkv = qkv.reshape(batch_size, seq_length, self.num_heads, 3 * self.head_dim)

qkv = qkv.permute(0, 2, 1, 3) # (batch_size, num_heads, seq_length, 3 * head_dim)

q, k, v = qkv.chunk(3, dim=-1) # Each has shape (batch_size, num_heads, seq_length, head_dim)

attn_weights = torch.matmul(q, k.transpose(-2, -1)) / (self.head_dim ** 0.5) # Scaled dot-product

attn_weights = self.softmax(attn_weights) # (batch_size, num_heads, seq_length, seq_length)

attn_output = torch.matmul(attn_weights, v) # (batch_size, num_heads, seq_length, head_dim)

attn_output = attn_output.permute(0, 2, 1, 3).contiguous()

attn_output = attn_output.reshape(batch_size, seq_length, embed_dim)

output = self.o_proj(attn_output)

return output, attn_weights

示例用法

batch_size = 2

seq_length = 5

embed_dim = 16

num_heads = 4

x = torch.randn(batch_size, seq_length, embed_dim)

self_attention_layer = MultiHeadSelfAttention(embed_dim, num_heads)

output, attn_weights = self_attention_layer(x)

print("Output:", output)

print("Attention Weights:", attn_weights)

相关推荐
lxmyzzs2 小时前
基于深度学习CenterPoint的3D目标检测部署实战
人工智能·深度学习·目标检测·自动驾驶·ros·激光雷达·3d目标检测
算法_小学生5 小时前
循环神经网络(RNN, Recurrent Neural Network)
人工智能·rnn·深度学习
努力还债的学术吗喽6 小时前
【速通】深度学习模型调试系统化方法论:从问题定位到性能优化
人工智能·深度学习·学习·调试·模型·方法论
伊织code7 小时前
PyTorch API 6
pytorch·api·ddp
大千AI助手7 小时前
GitHub Copilot:AI编程助手的架构演进与真实世界影响
人工智能·深度学习·大模型·github·copilot·ai编程·codex
学行库小秘9 小时前
基于门控循环单元的数据回归预测 GRU
人工智能·深度学习·神经网络·算法·回归·gru
范男10 小时前
基于Pytochvideo训练自己的的视频分类模型
人工智能·pytorch·python·深度学习·计算机视觉·3d·视频
聚客AI11 小时前
🧠深度解析模型压缩革命:减枝、量化、知识蒸馏
人工智能·深度学习·llm
SHIPKING39311 小时前
【机器学习&深度学习】Ollama、vLLM、LMDeploy对比:选择适合你的 LLM 推理框架
人工智能·深度学习·机器学习
coding者在努力12 小时前
深度学习核心技巧
人工智能·深度学习