Mindspore 公开课 - gpt2

GPT-2 Masked Self-Attention

GPT-2 Self-attention: 1- Creating queries, keys, and values
python 复制代码
batch_size = 1
seq_len = 10
embed_dim = 768

x = Tensor(np.random.randn(batch_size, seq_len, embed_dim), mindspore.float32)

from mindnlp._legacy.functional import split
from mindnlp.models.utils.utils import Conv1D

c_attn = Conv1D(3 * embed_dim, embed_dim)
query, key, value = split(c_attn(x), embed_dim, axis=2)
query.shape, key.shape, value.shape

def split_heads(tensor, num_heads, attn_head_size):
    """
    Splits hidden_size dim into attn_head_size and num_heads
    """
    new_shape = tensor.shape[:-1] + (num_heads, attn_head_size)
    tensor = tensor.view(new_shape)
    return ops.transpose(tensor, (0, 2, 1, 3))  # (batch, head, seq_length, head_features)

num_heads = 12
head_dim = embed_dim // num_heads

query = split_heads(query, num_heads, head_dim)
key = split_heads(key, num_heads, head_dim)
value = split_heads(value, num_heads, head_dim)

query.shape, key.shape, value.shape
GPT-2 Self-attention: 2- Scoring
python 复制代码
attn_weights = ops.matmul(query, key.swapaxes(-1, -2))

attn_weights.shape

max_positions = seq_len

bias = Tensor(np.tril(np.ones((max_positions, max_positions))).reshape(
              (1, 1, max_positions, max_positions)), mindspore.bool_)
bias
python 复制代码
from mindnlp._legacy.functional import where, softmax

attn_weights = attn_weights / ops.sqrt(ops.scalar_to_tensor(value.shape[-1]))
query_length, key_length = query.shape[-2], key.shape[-2]
causal_mask = bias[:, :, key_length - query_length: key_length, :key_length].bool()
mask_value = Tensor(np.finfo(np.float32).min, dtype=attn_weights.dtype)
attn_weights = where(causal_mask, attn_weights, mask_value)

np.finfo(np.float32).min

attn_weights[0, 0]


attn_weights = softmax(attn_weights, axis=-1)
attn_weights.shape

attn_weights[0, 0]

attn_output = ops.matmul(attn_weights, value)

attn_output.shape
GPT-2 Self-attention: 3.5- Merge attention heads
python 复制代码
def merge_heads(tensor, num_heads, attn_head_size):
    """
    Merges attn_head_size dim and num_attn_heads dim into hidden_size
    """
    tensor = ops.transpose(tensor, (0, 2, 1, 3))
    new_shape = tensor.shape[:-2] + (num_heads * attn_head_size,)
    return tensor.view(new_shape)

attn_output = merge_heads(attn_output, num_heads, head_dim)

attn_output.shape
GPT-2 Self-attention: 4- Projecting
python 复制代码
c_proj = Conv1D(embed_dim, embed_dim)
attn_output = c_proj(attn_output)
attn_output.shape
相关推荐
飞哥数智坊8 小时前
从CodeBuddy翻车到MasterGo救场,我的小程序UI终于焕然一新
人工智能
AKAMAI11 小时前
跳过复杂环节:Akamai应用平台让Kubernetes生产就绪——现已正式发布
人工智能·云原生·云计算
新智元12 小时前
阿里王牌 Agent 横扫 SOTA,全栈开源力压 OpenAI!博士级难题一键搞定
人工智能·openai
新智元13 小时前
刚刚,OpenAI/Gemini 共斩 ICPC 2025 金牌!OpenAI 满分碾压横扫全场
人工智能·openai
机器之心13 小时前
OneSearch,揭开快手电商搜索「一步到位」的秘技
人工智能·openai
阿里云大数据AI技术13 小时前
2025云栖大会·大数据AI参会攻略请查收!
大数据·人工智能
YourKing13 小时前
yolov11n.onnx格式模型转换与图像推理
人工智能
sans_13 小时前
NCCL的用户缓冲区注册
人工智能
sans_13 小时前
三种视角下的Symmetric Memory,下一代HPC内存模型
人工智能
算家计算14 小时前
模糊高清修复真王炸!ComfyUI-SeedVR2-Kontext(画质修复+P图)本地部署教程
人工智能·开源·aigc