理解Attention,MHA、MQA、GQA理论知识和代码实现

理论知识链接:理解Attention:从起源到MHA,MQA和GQA | Linsight

现有模型升级方法:https://blog.nghuyong.top/2023/09/10/NLP/llm-attention/

pytorch代码实现:

复制代码
class BaseAttention(torch.nn.Module):
    def __init__(self):
        super(BaseAttention, self).__init__()
        self.softmax = torch.nn.Softmax(dim=-1)

    def attention(self, q, k, v, mask=None, dropout=None):
        attn = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(q.shape[-1])

        if mask is not None:
            attn = attn + mask
        
        attn = self.softmax(attn)
        if dropout is not None:
            attn = dropout(attn)
        output = torch.matmul(attn, v)
        return output


class Attention(BaseAttention):

    def __init__(self, hidden_size, dropout=None):
        super(Attention, self).__init__()
        self.q_proj = torch.nn.Linear(hidden_size, hidden_size)
        self.k_proj = torch.nn.Linear(hidden_size, hidden_size)
        self.v_proj = torch.nn.Linear(hidden_size, hidden_size)
        self.softmax = torch.nn.Softmax(dim=-1)
        
        if dropout is not None:
            self.dropout = torch.nn.Dropout(p=dropout)
        else:
            self.dropout = None
    
    def forward(self, x, mask=None):
        q = self.q_proj(x)
        k = self.k_proj(x)
        v = self.v_proj(x)
        output = self.attention(q, k, v, mask, self.dropout)
        return output


class MHAttention(BaseAttention):

    def __init__(self, hidden_size, num_heads=32, dropout=None):
        super(MHAttention, self).__init__()
        self.num_heads = num_heads
        self.softmax = torch.nn.Softmax(dim=-1)
        self.q_proj = torch.nn.Linear(hidden_size, hidden_size)
        self.k_proj = torch.nn.Linear(hidden_size, hidden_size)
        self.v_proj = torch.nn.Linear(hidden_size, hidden_size)
        
        if dropout is not None:
            self.dropout = torch.nn.Dropout(p=dropout)
    
    def forward(self, x, mask=None):
        bs, seq_len, hidden_size = x.shape

        q = self.q_proj(x).view(bs, seq_len, self.num_heads, -1).transpose(1, 2)
        k = self.k_proj(x).view(bs, seq_len, self.num_heads, -1).transpose(1, 2)
        v = self.v_proj(x).view(bs, seq_len, self.num_heads, -1).transpose(1, 2)
        output = self.attention(q, k, v, mask, self.dropout)
        output = output.view(bs, seq_len, hidden_size)
        return output


class MQAttention(BaseAttention):

    def __init__(self, hidden_size, num_heads=32, dropout=None):
        super(MQAttention, self).__init__()
        self.num_heads = num_heads
        self.softmax = torch.nn.Softmax(dim=-1)
        assert hidden_size % num_heads == 0
        self.q_proj = torch.nn.Linear(hidden_size, hidden_size)
        self.k_proj = torch.nn.Linear(hidden_size, hidden_size // num_heads)
        self.v_proj = torch.nn.Linear(hidden_size, hidden_size // num_heads)
        
        if dropout is not None:
            self.dropout = torch.nn.Dropout(p=dropout)
    
    def forward(self, x, mask=None):
        bs, seq_len, hidden_size = x.shape

        q = self.q_proj(x).view(bs, seq_len, self.num_heads, -1).transpose(1, 2)
        k = self.k_proj(x).view(bs, seq_len, -1, hidden_size // self.num_heads).transpose(1, 2)
        v = self.v_proj(x).view(bs, seq_len, -1, hidden_size // self.num_heads).transpose(1, 2)
        output = self.attention(q, k, v, mask, self.dropout)
        output = output.view(bs, seq_len, hidden_size)
        return output


class GQAttention(BaseAttention):

    def __init__(self, hidden_size, num_heads=32, num_kv_heads=8, dropout=None):
        super(GQAttention, self).__init__()
        assert hidden_size % num_heads == 0 and num_heads % num_kv_heads == 0

        self.num_heads = num_heads
        self.num_kv_heads = num_kv_heads
        self.num_group = num_heads // num_kv_heads
        self.softmax = torch.nn.Softmax(dim=-1)
        self.q_proj = torch.nn.Linear(hidden_size, hidden_size)
        self.k_proj = torch.nn.Linear(hidden_size, hidden_size // num_heads * num_kv_heads)
        self.v_proj = torch.nn.Linear(hidden_size, hidden_size // num_heads * num_kv_heads)
        
        if dropout is not None:
            self.dropout = torch.nn.Dropout(p=dropout)
    
    def repeat_kv(self, feature, num_group): #llama2源码
        bs, num_kv_heads, seq_len, head_dims = feature.shape
        if num_group == 1:
            return feature
        feature = feature[:, :, None, :, :].expand(bs, num_kv_heads, num_group, seq_len, head_dims)
        return feature.reshape(bs, num_kv_heads * num_group, seq_len, head_dims)

    def forward(self, x, mask=None):
        bs, seq_len, hidden_size = x.shape

        q = self.q_proj(x).view(bs, seq_len, self.num_heads, -1).transpose(1, 2)
        k = self.k_proj(x).view(bs, seq_len, -1, hidden_size // self.num_heads).transpose(1, 2)
        v = self.v_proj(x).view(bs, seq_len, -1, hidden_size // self.num_heads).transpose(1, 2)
        k, v = self.repeat_kv(k, self.num_group), self.repeat_kv(v, self.num_group)
        output = self.attention(q, k, v, mask, self.dropout)
        output = output.view(bs, seq_len, hidden_size)
        return output
        

model = Attention(hidden_size=4096, dropout=0.1)
model = MHAttention(hidden_size=4096, num_heads=32, dropout=0.1)
model = MQAttention(hidden_size=4096, num_heads=32, dropout=0.1)
model = GQAttention(hidden_size=4096, num_heads=32, num_kv_heads=4, dropout=0.1)
input_data = torch.randn(1, 20, 4096)
output = model(input_data)
print()
相关推荐
sali-tec5 分钟前
C# 基于halcon的视觉工作流-章52-生成标定板
开发语言·图像处理·人工智能·算法·计算机视觉
IT古董6 分钟前
【第五章:计算机视觉-项目实战之推荐/广告系统】2.粗排算法-(4)粗排算法模型多目标算法(Multi Task Learning)及目标融合
人工智能·算法·1024程序员节
newxtc29 分钟前
【江苏政务服务网-注册_登录安全分析报告】
人工智能·安全·yolo·政务·1024程序员节·安全爆破
rengang6629 分钟前
013-Spring AI Alibaba Studio 功能完整案例
人工智能·spring·spring ai·ai应用编程
Tiandaren36 分钟前
自用提示词02 || Prompt Engineering || RAG数据切分 || 作用:通过LLM将文档切分成chunks
数据库·pytorch·深度学习·oracle·prompt·rag
勿在浮沙筑高台1 小时前
海龟交易系统R
前端·人工智能·r语言
阿水实证通1 小时前
面向社科研究者:用深度学习做因果推断(二)
深度学习·1024程序员节·因果推断·实证分析·科研创新
王哈哈^_^2 小时前
【数据集】【YOLO】【目标检测】口罩数据集,口罩佩戴识别数据集 1971 张,YOLO佩戴口罩检测算法实战训练教程。
人工智能·算法·yolo·目标检测·计算机视觉·ai·视觉检测
文火冰糖的硅基工坊2 小时前
[人工智能-大模型-83]:模型层技术 - 前向预测:神经网络是如何产生涌现智能的?背后的本质是什么?
人工智能·深度学习·神经网络
taxunjishu2 小时前
西门子 1500 PLC 依托 Ethernet/ip 转 Modbus RTU联合发那科机器人优化生产流程
人工智能·区块链·工业物联网·工业自动化·总线协议