XLA中生成Causal Mask上三角-inf矩阵

transformers生成CausalAttentionMask的上三角-inf矩阵:

参考transformers源码

python 复制代码
import torch
import torch_xla
import torch_xla.core.xla_model as xm
import os

os.environ['PJRT_DEVICE']='IPU'
# os.environ['PJRT_DEVICE']='GPU'
# os.environ['XLA_FLAGS']='--xla_dump_to=gen_AttnFwd-XLA_GPU'

tgt_len = 10
dtype=torch.float32
device = xm.xla_device()

# src/transformers/modeling_attn_mask_utils.py#AttentionMaskConverter::_make_causal_mask
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
print(mask)
# print(mask.size())
# print(mask[3][3])

"""
2024-11-07 07:16:18.824506: F tensorflow/compiler/xla/service/hlo_computation.cc:70] Check failed: nullptr != root (nullptr vs. 0)
Aborted (core dumped)
"""

'''
module @SyncTensorsGraph.25 {
  func.func @main() -> tuple<tensor<10x10xf32>> {
    %0 = mhlo.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]> : tensor<10xi64>
    %1 = "mhlo.broadcast_in_dim"(%0) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<10xi64>) -> tensor<10x10xi64>
    %2 = mhlo.constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]> : tensor<10xi64>
    %3 = "mhlo.broadcast_in_dim"(%2) {broadcast_dimensions = dense<0> : tensor<1xi64>} : (tensor<10xi64>) -> tensor<10x10xi64>
    %4 = mhlo.compare  LT, %1, %3 : (tensor<10x10xi64>, tensor<10x10xi64>) -> tensor<10x10xi1>
    %5 = mhlo.constant dense<false> : tensor<i1>
    %6 = "mhlo.broadcast_in_dim"(%5) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<i1>) -> tensor<10x10xi1>
    %7 = mhlo.compare  NE, %4, %6 : (tensor<10x10xi1>, tensor<10x10xi1>) -> tensor<10x10xi1>
    %8 = mhlo.constant dense<0.000000e+00> : tensor<f32>
    %9 = "mhlo.broadcast_in_dim"(%8) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<10x10xf32>
    %10 = mhlo.constant dense<-3.40282347E+38> : tensor<f32>
    %11 = "mhlo.broadcast_in_dim"(%10) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor<f32>) -> tensor<10x10xf32>
    %12 = "mhlo.select"(%7, %9, %11) : (tensor<10x10xi1>, tensor<10x10xf32>, tensor<10x10xf32>) -> tensor<10x10xf32>
    %13 = "mhlo.tuple"(%12) {xla_shape = "(f32[10,10]{1,0})"} : (tensor<10x10xf32>) -> tuple<tensor<10x10xf32>>
    return %13 : tuple<tensor<10x10xf32>>
  }
}
'''

'''
XLA_GPU甚至给出了完整的mhlo实现:
gen_AttnFwd-XLA_GPU/module_0000.SyncTensorsGraph.25.sm_8.0_gpu_after_optimizations.txt

HloModule SyncTensorsGraph.25, entry_computation_layout={(f32[])->(f32[10,10]{1,0})}

fused_computation {
  iota.3 = s64[10,10]{1,0} iota(), iota_dimension=1
  iota.2 = s64[10]{0} iota(), iota_dimension=0
  constant_5 = s64[] constant(1)
  broadcast.7 = s64[10]{0} broadcast(constant_5), dimensions={}
  add.0 = s64[10]{0} add(iota.2, broadcast.7)
  broadcast.6 = s64[10,10]{1,0} broadcast(add.0), dimensions={0}
  compare.1 = pred[10,10]{1,0} compare(iota.3, broadcast.6), direction=LT
  constant_3 = pred[] constant(false)
  broadcast.4 = pred[10,10]{1,0} broadcast(constant_3), dimensions={}
  compare.0 = pred[10,10]{1,0} compare(compare.1, broadcast.4), direction=NE
  constant_0 = f32[] constant(0)
  broadcast.3 = f32[10,10]{1,0} broadcast(constant_0), dimensions={}
  param_0.1 = f32[] parameter(0)
  broadcast.2 = f32[10,10]{1,0} broadcast(param_0.1), dimensions={}
  ROOT select.0 = f32[10,10]{1,0} select(compare.0, broadcast.3, broadcast.2)
}

ENTRY SyncTensorsGraph.25 {
  p0.13 = f32[] parameter(0)
  fusion = f32[10,10]{1,0} fusion(p0.13), kind=kLoop, calls=fused_computation
  ROOT tuple.24 = (f32[10,10]{1,0}) tuple(fusion)
}

-----
INFO:torch_xla:Letting libtpu.so load fail during _XLAC import. libtpu.so will be loaded from `libtpu` Python package when the ComputationClient is created.
2024-11-07 11:50:41.174644: I tensorflow/compiler/xla/service/service.cc:173] XLA service 0x905c190 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
2024-11-07 11:50:41.174714: I tensorflow/compiler/xla/service/service.cc:181]   StreamExecutor device (0): NVIDIA A100-SXM4-80GB, Compute Capability 8.0
2024-11-07 11:50:41.175641: I tensorflow/compiler/xla/pjrt/gpu/se_gpu_pjrt_client.cc:194] Using BFC allocator.
2024-11-07 11:50:41.175713: I tensorflow/compiler/xla/pjrt/gpu/gpu_helpers.cc:105] XLA backend allocating 75175958937 bytes on device 0 for BFCAllocator.
2024-11-07 11:50:42.013482: I tensorflow/compiler/xla/service/dump.cc:485] HloModule dump enabled with path prefix: , suffix: before_optimizations
2024-11-07 11:50:42.037845: I tensorflow/tsl/platform/default/subprocess.cc:304] Start cannot spawn child process: No such file or directory
tensor([[ 0.0000e+00, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38,
         -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
        [ 0.0000e+00,  0.0000e+00, -3.4028e+38, -3.4028e+38, -3.4028e+38,
         -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
        [ 0.0000e+00,  0.0000e+00,  0.0000e+00, -3.4028e+38, -3.4028e+38,
         -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
        [ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00, -3.4028e+38,
         -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
        [ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,
         -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
        [ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,
          0.0000e+00, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
        [ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,
          0.0000e+00,  0.0000e+00, -3.4028e+38, -3.4028e+38, -3.4028e+38],
        [ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,
          0.0000e+00,  0.0000e+00,  0.0000e+00, -3.4028e+38, -3.4028e+38],
        [ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,
          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00, -3.4028e+38],
        [ 0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,
          0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00,  0.0000e+00]],
       device='xla:0')

'''
相关推荐
前端小L1 小时前
图论专题(十九):DAG上的“关键路径”——极限规划「并行课程 III」
算法·矩阵·深度优先·图论·宽度优先
AI即插即用2 小时前
即插即用系列 | 2025 MambaNeXt-YOLO 炸裂登场!YOLO 激吻 Mamba,打造实时检测新霸主
人工智能·pytorch·深度学习·yolo·目标检测·计算机视觉·视觉检测
艾莉丝努力练剑2 小时前
【优选算法必刷100题】第031~32题(前缀和算法):连续数组、矩阵区域和
大数据·人工智能·线性代数·算法·矩阵·二维前缀和
Beginner x_u3 小时前
线性代数 必背公式总结&&线代计算技巧总结_分块矩阵大总结_秩一矩阵大总结
线性代数·矩阵·特征值·特征向量·计算技巧
没书读了3 小时前
计算机组成原理-考前记忆清单
线性代数·算法
Hcoco_me3 小时前
大模型面试题5:矩阵(M*M)特征值分解的步骤
算法·机器学习·矩阵
忘却的旋律dw4 小时前
使用LLM模型的tokenizer报错AttributeError: ‘dict‘ object has no attribute ‘model_type‘
人工智能·pytorch·python
oscar9994 小时前
高等数学第四章 向量代数与空间解析几何
线性代数·矩阵
studytosky5 小时前
深度学习理论与实战:MNIST 手写数字分类实战
人工智能·pytorch·python·深度学习·机器学习·分类·matplotlib
前端小L7 小时前
图论专题(二十五):最小生成树(MST)——用最少的钱,连通整个世界「连接所有点的最小费用」
算法·矩阵·深度优先·图论·宽度优先