SelfAttention和MultiHeadAttion实现demo

#encoding:utf-8

from math import sqrt

import torch

import torch.nn as nn

class Self_Attention(nn.Module):

def init(self, input_dim, dim_k, dim_v):

super(Self_Attention, self). init()

self.q = nn.Linear(input_dim, dim_k)

self.k = nn.Linear(input_dim, dim_k)

self.v = nn.Linear(input_dim, dim_v)

self.norm_fact = 1 / sqrt(dim_k)

def forward(self, x):

print("x.shape:", x.shape)

print("q.shape:", self.q.shape)

Q = self.q(x)

print("Q.shape:", Q.shape)

K = self.k(x)

print("K.shape:", K.shape)

V = self.v(x)

print("V.shape:", V.shape)

atten = nn.Softmax(dim=-1)(torch.bmm(Q,K.permute(0,2,1))) * self.norm_fact

output = torch.bmm(atten, V)

return output

print("\n")

print("self attention:")

x = torch.randn(4,3,1024)

print(x)

print("input size:", x.size())

self_attention = Self_Attention(1024,128,5)

res = self_attention(x)

print("\n")

print(res)

print("output size:", res.size())

print("\n")

class Self_Attention_Muti_Head(nn.Module):

def init(self, input_dim, dim_k, dim_v, nums_head):

super(Self_Attention_Muti_Head, self).init()

assert dim_k % nums_head == 0

assert dim_v % nums_head == 0

self.q = nn.Linear(input_dim, dim_k)

self.k = nn.Linear(input_dim, dim_k)

self.v = nn.Linear(input_dim, dim_v)

self.nums_head = nums_head

self.dim_k = dim_k

self.dim_v = dim_v

self._norm_fact = 1 / sqrt(dim_k)

def forward(self, x):

Q = self.q(x).reshape(-1, x.shape[0], x.shape[1], self.dim_k//self.nums_head)

K = self.k(x).reshape(-1, x.shape[0], x.shape[1], self.dim_k//self.nums_head)

V = self.v(x).reshape(-1, x.shape[0], x.shape[1], self.dim_v//self.nums_head)

print("x.shape:", x.shape)

print("Q.shape", Q.size())

atten = nn.Softmax(dim=-1)(torch.matmul(Q, K.permute(0,1,3,2)))

output = torch.matmul(atten, V).reshape(x.shape[0], x.shape[1], -1)

return output

print("\n")

print("multi head attention:")

x = torch.randn(4,3,1024)

print(x)

print(x.size())

self_attention = Self_Attention_Muti_Head(1024,128,6,2)

res = self_attention(x)

print("\n")

print(res)

print(res.size())


有个问题:

根据文献:https://arxiv.org/pdf/1911.02150.pdf,感觉这里说的Multi Head Attenion和 Group Query Attention意思是一样的:

这下面这张经典的图中的的Grouped-query意思是一样的:

哪里没理解到位?

相关推荐
Blossom.11811 分钟前
把 AI 推理塞进「 8 位 MCU 」——0.5 KB RAM 跑通关键词唤醒的魔幻之旅
人工智能·笔记·单片机·嵌入式硬件·深度学习·机器学习·搜索引擎
跟橙姐学代码1 小时前
学Python像学做人:从基础语法到人生哲理的成长之路
前端·python
Keying,,,,1 小时前
力扣hot100 | 矩阵 | 73. 矩阵置零、54. 螺旋矩阵、48. 旋转图像、240. 搜索二维矩阵 II
python·算法·leetcode·矩阵
桃源学社(接毕设)2 小时前
基于人工智能和物联网融合跌倒监控系统(LW+源码+讲解+部署)
人工智能·python·单片机·yolov8
yunhuibin2 小时前
pycharm2025导入anaconda创建的各个AI环境
人工智能·python
杨荧2 小时前
基于Python的电影评论数据分析系统 Python+Django+Vue.js
大数据·前端·vue.js·python
2502_927161282 小时前
DAY 40 训练和测试的规范写法
人工智能·深度学习·机器学习
python-行者2 小时前
akamai鼠标轨迹
爬虫·python·计算机外设·akamai
R-G-B3 小时前
【P14 3-6 】OpenCV Python——视频加载、摄像头调用、视频基本信息获取(宽、高、帧率、总帧数)
python·opencv·视频加载·摄像头调用·获取视频基本信息·获取视频帧率·获取视频帧数
赵英英俊3 小时前
Python day46
python·深度学习·机器学习