38、深度学习-自学之路-自己搭建深度学习框架-3、自动梯度计算改进

复制代码
import numpy as np


class Tensor(object):

    def __init__(self, data,
                 autograd=False,
                 creators=None,
                 creation_op=None,
                 id=None):

        self.data = np.array(data)
        self.autograd = autograd
        self.grad = None
        if (id is None):
            self.id = np.random.randint(0, 100000)
        else:
            self.id = id

        self.creators = creators
        self.creation_op = creation_op
        self.children = {}

        if(creators is not None):
            for c in creators:
                if(self.id not in c.children):
                    c.children[self.id] = 1
                else:
                    c.children[self.id] += 1

    def all_children_grads_accounted_for(self):
        for id, cnt in self.children.items():
            if (cnt != 0):
                return False
        return True

    def backward(self, grad=None, grad_origin=None):
        if (self.autograd):
            if (grad is None):
                grad = FloatTensor(np.ones_like(self.data))

            if (grad_origin is not None):
                if (self.children[grad_origin.id] == 0):
                    raise Exception("cannot backprop more than once")
                else:
                    self.children[grad_origin.id] -= 1
            if (self.grad is None):
                self.grad = grad
            else:
                self.grad += grad

            # grads must not have grads of their own
            assert grad.autograd == False

            # only continue backpropping if there's something to
            # backprop into and if all gradients (from children)
            # are accounted for override waiting for children if
            # "backprop" was called on this variable directly
            if (self.creators is not None and
                    (self.all_children_grads_accounted_for() or
                     grad_origin is None)):

                if (self.creation_op == "add"):
                    self.creators[0].backward(self.grad, self)
                    self.creators[1].backward(self.grad, self)

    def __add__(self, other):
        if (self.autograd and other.autograd):
            return Tensor(self.data + other.data,
                          autograd=True,
                          creators=[self, other],
                          creation_op="add")
        return Tensor(self.data + other.data)

    def __repr__(self):
        return str(self.data.__repr__())

    def __str__(self):
        return str(self.data.__str__())


a = Tensor([1, 2, 3, 4, 5], autograd=True)
b = Tensor([2, 2, 2, 2, 2], autograd=True)
c = Tensor([5, 4, 3, 2, 1], autograd=True)

d = a + b
e = b + c
f = d + e

f.backward(Tensor(np.array([1, 1, 1, 1, 1])))

print(b.grad.data == np.array([2, 2, 2, 2, 2]))
相关推荐
X.AI66617 分钟前
YouTube评论情感分析项目84%正确率:基于BERT的实战复现与原理解析
人工智能·深度学习·bert
艾莉丝努力练剑24 分钟前
【C++:继承】面向对象编程精要:C++继承机制深度解析与最佳实践
开发语言·c++·人工智能·继承·c++进阶
小宁爱Python39 分钟前
从零搭建 RAG 智能问答系统 6:Text2SQL 与工作流实现数据库查询
数据库·人工智能·python·django
Hard_Liquor40 分钟前
Datawhale秋训营-“大运河杯”数据开发应用创新大赛
人工智能·深度学习·算法
运维行者_1 小时前
AWS云服务故障复盘——从故障中汲取的 IT 运维经验
大数据·linux·运维·服务器·人工智能·云计算·aws
Saniffer_SH1 小时前
搭载高性能GPU的英伟达Nvidia DGX Spark桌面性能小怪兽国内首台开箱视频!
人工智能·深度学习·神经网络·ubuntu·机器学习·语言模型·边缘计算
数字化脑洞实验室1 小时前
AI决策vs人工决策:效率的底层逻辑与选择边界
人工智能
可触的未来,发芽的智生1 小时前
追根索源:换不同的词嵌入(词向量生成方式不同,但词与词关系接近),会出现什么结果?
javascript·人工智能·python·神经网络·自然语言处理
递归不收敛1 小时前
三、检索增强生成(RAG)技术体系
人工智能·笔记·自然语言处理