38、深度学习-自学之路-自己搭建深度学习框架-3、自动梯度计算改进

复制代码
import numpy as np


class Tensor(object):

    def __init__(self, data,
                 autograd=False,
                 creators=None,
                 creation_op=None,
                 id=None):

        self.data = np.array(data)
        self.autograd = autograd
        self.grad = None
        if (id is None):
            self.id = np.random.randint(0, 100000)
        else:
            self.id = id

        self.creators = creators
        self.creation_op = creation_op
        self.children = {}

        if(creators is not None):
            for c in creators:
                if(self.id not in c.children):
                    c.children[self.id] = 1
                else:
                    c.children[self.id] += 1

    def all_children_grads_accounted_for(self):
        for id, cnt in self.children.items():
            if (cnt != 0):
                return False
        return True

    def backward(self, grad=None, grad_origin=None):
        if (self.autograd):
            if (grad is None):
                grad = FloatTensor(np.ones_like(self.data))

            if (grad_origin is not None):
                if (self.children[grad_origin.id] == 0):
                    raise Exception("cannot backprop more than once")
                else:
                    self.children[grad_origin.id] -= 1
            if (self.grad is None):
                self.grad = grad
            else:
                self.grad += grad

            # grads must not have grads of their own
            assert grad.autograd == False

            # only continue backpropping if there's something to
            # backprop into and if all gradients (from children)
            # are accounted for override waiting for children if
            # "backprop" was called on this variable directly
            if (self.creators is not None and
                    (self.all_children_grads_accounted_for() or
                     grad_origin is None)):

                if (self.creation_op == "add"):
                    self.creators[0].backward(self.grad, self)
                    self.creators[1].backward(self.grad, self)

    def __add__(self, other):
        if (self.autograd and other.autograd):
            return Tensor(self.data + other.data,
                          autograd=True,
                          creators=[self, other],
                          creation_op="add")
        return Tensor(self.data + other.data)

    def __repr__(self):
        return str(self.data.__repr__())

    def __str__(self):
        return str(self.data.__str__())


a = Tensor([1, 2, 3, 4, 5], autograd=True)
b = Tensor([2, 2, 2, 2, 2], autograd=True)
c = Tensor([5, 4, 3, 2, 1], autograd=True)

d = a + b
e = b + c
f = d + e

f.backward(Tensor(np.array([1, 1, 1, 1, 1])))

print(b.grad.data == np.array([2, 2, 2, 2, 2]))
相关推荐
珠海西格电力9 小时前
零碳园区基础架构协同规划:能源-建筑-交通-数字系统的衔接逻辑
大数据·人工智能·智慧城市·能源
chao1898449 小时前
MATLAB 实现声纹识别特征提取
人工智能·算法·matlab
zhishidi9 小时前
推荐算法之:GBDT、GBDT LR、XGBoost详细解读与案例实现
人工智能·算法·推荐算法
yiersansiwu123d9 小时前
AI伦理治理:在创新与规范之间寻找平衡
人工智能
weixin_537217069 小时前
AI 智能体如何利用文件系统进行上下文工程
大数据·人工智能
胡乱编胡乱赢9 小时前
Decaf攻击:联邦学习中的数据分布分解攻击
人工智能·深度学习·机器学习·联邦学习·decaf攻击
骥龙9 小时前
5.14、AI安全运维体系:构建企业级的“安全超脑”
运维·人工智能·安全
阿里云大数据AI技术9 小时前
PAI Physical AI Notebook详解(5):基于Isaac-Cortex的软件在环验证
人工智能
远上寒山10 小时前
DINO 系列(v1/v2/v3)之二:DINOv2 原理的详细介绍
人工智能·深度学习·自监督·dinov2·自蒸馏·dino系列
_codemonster10 小时前
深度学习实战(基于pytroch)系列(四十)长短期记忆(LSTM)从零开始实现
人工智能·深度学习·lstm