SelfAttention和MultiHeadAttion实现demo

#encoding:utf-8

from math import sqrt

import torch

import torch.nn as nn

class Self_Attention(nn.Module):

def init(self, input_dim, dim_k, dim_v):

super(Self_Attention, self). init()

self.q = nn.Linear(input_dim, dim_k)

self.k = nn.Linear(input_dim, dim_k)

self.v = nn.Linear(input_dim, dim_v)

self.norm_fact = 1 / sqrt(dim_k)

def forward(self, x):

print("x.shape:", x.shape)

print("q.shape:", self.q.shape)

Q = self.q(x)

print("Q.shape:", Q.shape)

K = self.k(x)

print("K.shape:", K.shape)

V = self.v(x)

print("V.shape:", V.shape)

atten = nn.Softmax(dim=-1)(torch.bmm(Q,K.permute(0,2,1))) * self.norm_fact

output = torch.bmm(atten, V)

return output

print("\n")

print("self attention:")

x = torch.randn(4,3,1024)

print(x)

print("input size:", x.size())

self_attention = Self_Attention(1024,128,5)

res = self_attention(x)

print("\n")

print(res)

print("output size:", res.size())

print("\n")

class Self_Attention_Muti_Head(nn.Module):

def init(self, input_dim, dim_k, dim_v, nums_head):

super(Self_Attention_Muti_Head, self).init()

assert dim_k % nums_head == 0

assert dim_v % nums_head == 0

self.q = nn.Linear(input_dim, dim_k)

self.k = nn.Linear(input_dim, dim_k)

self.v = nn.Linear(input_dim, dim_v)

self.nums_head = nums_head

self.dim_k = dim_k

self.dim_v = dim_v

self._norm_fact = 1 / sqrt(dim_k)

def forward(self, x):

Q = self.q(x).reshape(-1, x.shape[0], x.shape[1], self.dim_k//self.nums_head)

K = self.k(x).reshape(-1, x.shape[0], x.shape[1], self.dim_k//self.nums_head)

V = self.v(x).reshape(-1, x.shape[0], x.shape[1], self.dim_v//self.nums_head)

print("x.shape:", x.shape)

print("Q.shape", Q.size())

atten = nn.Softmax(dim=-1)(torch.matmul(Q, K.permute(0,1,3,2)))

output = torch.matmul(atten, V).reshape(x.shape[0], x.shape[1], -1)

return output

print("\n")

print("multi head attention:")

x = torch.randn(4,3,1024)

print(x)

print(x.size())

self_attention = Self_Attention_Muti_Head(1024,128,6,2)

res = self_attention(x)

print("\n")

print(res)

print(res.size())


有个问题:

根据文献:https://arxiv.org/pdf/1911.02150.pdf,感觉这里说的Multi Head Attenion和 Group Query Attention意思是一样的:

这下面这张经典的图中的的Grouped-query意思是一样的:

哪里没理解到位?

相关推荐
Kyln.Wu17 小时前
【python实用小脚本-205】[HR揭秘]手工党逐行查Bug的终结者|Python版代码质量“CT机”加速器(建议收藏)
开发语言·python·bug
计算机毕业设计木哥17 小时前
Python毕业设计推荐:基于Django的饮食计划推荐与交流分享平台 饮食健康系统 健康食谱计划系统
开发语言·hadoop·spring boot·后端·python·django·课程设计
Deng_Xian_Sheng17 小时前
有哪些任务可以使用无监督的方式训练深度学习模型?
人工智能·深度学习·无监督
小草cys17 小时前
在树莓派集群上部署 Distributed Llama (Qwen 3 14B) 详细指南
python·llama·树莓派·qwen
数据科学作家20 小时前
学数据分析必囤!数据分析必看!清华社9本书覆盖Stata/SPSS/Python全阶段学习路径
人工智能·python·机器学习·数据分析·统计·stata·spss
HXQ_晴天21 小时前
CASToR 生成的文件进行转换
python
CV缝合救星21 小时前
【Arxiv 2025 预发行论文】重磅突破!STAR-DSSA 模块横空出世:显著性+拓扑双重加持,小目标、大场景统统拿下!
人工智能·深度学习·计算机视觉·目标跟踪·即插即用模块
java1234_小锋1 天前
Scikit-learn Python机器学习 - 特征预处理 - 标准化 (Standardization):StandardScaler
python·机器学习·scikit-learn
Python×CATIA工业智造1 天前
Python带状态生成器完全指南:从基础到高并发系统设计
python·pycharm
向qian看_-_1 天前
Linux 使用pip报错(error: externally-managed-environment )解决方案
linux·python·pip