注意力机制和自注意力机制模块的相关代码实现/缝合模块/即插即用模块

复制代码
注意力机制

import torch
import torch.nn as nn

class Attention(nn.Module):
    def __init__(self, hidden_dim):
        super(Attention, self).__init__()
        self.attention = nn.Linear(hidden_dim, 1, bias=False)

    def forward(self, encoder_outputs):
        # encoder_outputs shape: (batch_size, sequence_length, hidden_dim)
        attn_weights = self.attention(encoder_outputs)  # (batch_size, sequence_length, 1)
        attn_weights = torch.softmax(attn_weights, dim=1)  # (batch_size, sequence_length, 1)
        context = torch.sum(attn_weights * encoder_outputs, dim=1)  # (batch_size, hidden_dim)
        return context, attn_weights

# 示例用法
batch_size = 2
sequence_length = 5
hidden_dim = 10

encoder_outputs = torch.randn(batch_size, sequence_length, hidden_dim)
attention_layer = Attention(hidden_dim)
context, attn_weights = attention_layer(encoder_outputs)

print("Context:", context)
print("Attention Weights:", attn_weights)

自注意力机制

import torch

import torch.nn as nn

class MultiHeadSelfAttention(nn.Module):

def init(self, embed_dim, num_heads):

super(MultiHeadSelfAttention, self).init()

self.embed_dim = embed_dim

self.num_heads = num_heads

self.head_dim = embed_dim // num_heads

assert self.head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"

self.qkv_proj = nn.Linear(embed_dim, embed_dim * 3)

self.o_proj = nn.Linear(embed_dim, embed_dim)

self.softmax = nn.Softmax(dim=-1)

def forward(self, x):

batch_size, seq_length, embed_dim = x.size()

qkv = self.qkv_proj(x) # (batch_size, seq_length, embed_dim * 3)

qkv = qkv.reshape(batch_size, seq_length, self.num_heads, 3 * self.head_dim)

qkv = qkv.permute(0, 2, 1, 3) # (batch_size, num_heads, seq_length, 3 * head_dim)

q, k, v = qkv.chunk(3, dim=-1) # Each has shape (batch_size, num_heads, seq_length, head_dim)

attn_weights = torch.matmul(q, k.transpose(-2, -1)) / (self.head_dim ** 0.5) # Scaled dot-product

attn_weights = self.softmax(attn_weights) # (batch_size, num_heads, seq_length, seq_length)

attn_output = torch.matmul(attn_weights, v) # (batch_size, num_heads, seq_length, head_dim)

attn_output = attn_output.permute(0, 2, 1, 3).contiguous()

attn_output = attn_output.reshape(batch_size, seq_length, embed_dim)

output = self.o_proj(attn_output)

return output, attn_weights

示例用法

batch_size = 2

seq_length = 5

embed_dim = 16

num_heads = 4

x = torch.randn(batch_size, seq_length, embed_dim)

self_attention_layer = MultiHeadSelfAttention(embed_dim, num_heads)

output, attn_weights = self_attention_layer(x)

print("Output:", output)

print("Attention Weights:", attn_weights)

相关推荐
嵌入式-老费34 分钟前
自己动手写深度学习框架(从网络训练到部署)
人工智能·深度学习
强化学习与机器人控制仿真1 小时前
字节最新开源模型 DA3(Depth Anything 3)使用教程(一)从任意视角恢复视觉空间
人工智能·深度学习·神经网络·opencv·算法·目标检测·计算机视觉
2501_941145853 小时前
深度学习与计算机视觉在工业质检与智能检测系统中的创新应用研究
人工智能·深度学习·计算机视觉
努力的光头强3 小时前
《智能体设计模式》从零基础入门到精通,看这一篇就够了!
大数据·人工智能·深度学习·microsoft·机器学习·设计模式·ai
没头脑的男大4 小时前
Unet+Transformer脑肿瘤分割检测
人工智能·深度学习·transformer
AI即插即用4 小时前
即插即用涨点系列(十四)2025 SOTA | Efficient ViM:基于“隐状态混合SSD”与“多阶段融合”的轻量级视觉 Mamba 新标杆
人工智能·pytorch·深度学习·计算机视觉·视觉检测·transformer
哥布林学者6 小时前
吴恩达深度学习课程二: 改善深层神经网络 第三周:超参数调整,批量标准化和编程框架(四)编程框架
深度学习·ai
brave and determined8 小时前
可编程逻辑器件学习(day24):异构计算:突破算力瓶颈的未来之路
人工智能·嵌入式硬件·深度学习·学习·算法·fpga·asic
nju_spy9 小时前
论文阅读 - 深度学习端到端解决库存管理问题 - 有限时间范围内的多周期补货问题(Management Science)
人工智能·深度学习·动态规划·端到端·库存管理·两阶段pto·多周期补货问题
u***j3249 小时前
深度学习实践
人工智能·深度学习