rust-candle学习笔记12-实现因果注意力

参考:about-pytorch

定义结构体:

rust 复制代码
struct CausalAttention {
    w_qkv: Linear,
    dropout: Dropout, 
    d_model: Tensor,
    mask: Tensor,
    device: Device,   
}

定义new方法:

rust 复制代码
impl CausalAttention {
    fn new(vb: VarBuilder, embedding_dim: usize, out_dim: usize, seq_len: usize, dropout: f32, device: Device) -> Result<Self> {
        Ok(Self { 
            w_qkv: linear_no_bias(embedding_dim, 3*out_dim, vb.pp("w_qkv"))?,
            d_model: Tensor::new(embedding_dim as f32, &device)?,
            mask: Tensor::tril2(seq_len, DType::U32, &device)?,
            dropout: Dropout::new(dropout),
            device
        })
    }
}

定义forward方法:

rust 复制代码
    fn forward(&self, x: &Tensor, train: bool) -> Result<Tensor> { 
        let qkv = self.w_qkv.forward(x)?;
        let (batch_size, seq_len, _) = qkv.dims3()?;
        let qkv = qkv.reshape((batch_size, seq_len, 3, ()))?;
        let q = qkv.get_on_dim(2, 0)?;
        let q = q.reshape((batch_size, seq_len, ()))?;
        let k = qkv.get_on_dim(2, 1)?;
        let k = k.reshape((batch_size, seq_len, ()))?;
        let v = qkv.get_on_dim(2, 2)?;
        let v = v.reshape((batch_size, seq_len, ()))?;
        let mut attn_score = q.matmul(&k.t()?)?;
        // println!("attn_score: {:?}\n", attn_score.to_vec3::<f32>()?);
        let dim = attn_score.rank() - 1;
        let mask_dim = attn_score.dims()[dim];
        let mask = self.mask.broadcast_as(attn_score.shape())?;
        // println!("mask: {:?}\n", mask);
        // println!("mask: {:?}\n", mask.to_vec3::<u32>()?);
        attn_score = masked_fill(&attn_score, &mask, f32::NEG_INFINITY)?;
        // println!("attn_score: {:?}\n", attn_score);
        // println!("attn_score: {:?}\n", attn_score.to_vec3::<f32>()?);
        let attn_score = attn_score.broadcast_div(&self.d_model.sqrt()?)?; 
        let attn_weights = ops::softmax(&attn_score, dim)?;
        // println!("attn_weights: {:?}\n", attn_weights);
        // println!("attn_weights: {:?}\n", attn_weights.to_vec3::<f32>()?); 
        let attn_weights = self.dropout.forward(&attn_weights, train)?;
        // println!("dropout attn_weights: {:?}\n", attn_weights);
        // println!("dropout attn_weights: {:?}\n", attn_weights.to_vec3::<f32>()?); 
        let attn_output = attn_weights.matmul(&v)?;
        Ok(attn_output)
    }

测试:

rust 复制代码
fn main() -> Result<()> {
    let device = Device::cuda_if_available(0)?;
    let varmap = VarMap::new();
    let vb = VarBuilder::from_varmap(&varmap, candle_core::DType::F32, &device);
    
    let input = Tensor::from_vec(vec![0.43f32, 0.15, 0.89, 
                                                    0.55, 0.87, 0.66,
                                                    0.57, 0.85, 0.64,
                                                    0.22, 0.58, 0.33,
                                                    0.77, 0.25, 0.10,
                                                    0.05, 0.80, 0.55, 
                                                    0.43, 0.15, 0.89, 
                                                    0.55, 0.87, 0.66,
                                                    0.57, 0.85, 0.64,
                                                    0.22, 0.58, 0.33,
                                                    0.77, 0.25, 0.10,
                                                    0.05, 0.80, 0.55], (2, 6, 3), &device)?;
    let model = CausalAttention::new(vb.clone(), 3, 2, 6, 0.5, device.clone())?;
    let output = model.forward(&input, true)?;
    println!("output: {:?}\n", output);
    println!("output: {:?}\n", output.to_vec3::<f32>()?);
    Ok(())
}
相关推荐
d111111111d6 分钟前
STM32 I2C通信详解:从机地址 vs 寄存器地址
笔记·stm32·单片机·嵌入式硬件·学习·模块测试
蕨蕨学AI10 分钟前
【2025】个人学习与实践总结
经验分享·学习·改行学it
STLearner11 分钟前
2025时空数据研究工作总结
大数据·人工智能·python·深度学习·学习·机器学习·智慧城市
2401_8414956414 分钟前
自然语言处理实战——基于BP神经网络的命名实体识别
人工智能·python·神经网络·算法·机器学习·自然语言处理·命名实体识别
wdfk_prog14 分钟前
[Linux]学习笔记系列 -- [fs]namei
linux·笔记·学习
航Hang*14 分钟前
第六章:网络系统建设与运维(中级)——链路聚合
运维·服务器·网络·笔记·华为·ensp
极客BIM工作室14 分钟前
AI导读AI论文: FinGPT: Open-Source Financial Large Language Models
人工智能·语言模型·自然语言处理
superman超哥16 分钟前
Rust 异步性能最佳实践:高并发场景的极致优化
开发语言·后端·rust·最佳实践·异步性能·高并发场景
·present·18 分钟前
射频网课学习第七章(驱动放大器设计)
学习
Albert.H.Holmes20 分钟前
Elasticsearch学习
大数据·学习·elasticsearch