diff --git a/Makefile b/Makefile index 2bf19b1..ce572e1 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ endif VPATH = shared MAIN = main.o -OBJS = main.o matrix.o generator.o matrix_printer.o functions.o network_layer.o m_algorithms_concepts.o m_algorithms.o m_algorithms_utilities.o m_algorithms_register.o matrix_benchmark.o activation_functions.o tensor.o tensor_forward_wrapper.o +OBJS = main.o matrix.o generator.o matrix_printer.o functions.o network_layer.o m_algorithms_concepts.o m_algorithms.o m_algorithms_utilities.o m_algorithms_register.o matrix_benchmark.o activation_functions.o tensor.o tensor_forward_wrapper.o tensor_backwards_pass.o OBJS_FOR_UNIT_TEST = $(foreach obj, $(OBJS), $(filter-out $(MAIN), $(wildcard *.o))) diff --git a/include/m_algorithms_register.h b/include/m_algorithms_register.h index 2dd049f..63931cd 100644 --- a/include/m_algorithms_register.h +++ b/include/m_algorithms_register.h @@ -14,6 +14,8 @@ and then Tensor inherits from that class. Visitor Polymorphism depending on task, reading for creating graph, writing data back, ... +transfer number: 7192 +1-800-829-1040 */ namespace NeuralNetwork { @@ -24,74 +26,51 @@ namespace NeuralNetwork { class Tensor; - class RegisteredOperation : std::enable_shared_from_this { + class RegisteredOperation { using cgNode = std::shared_ptr; - using T = std::shared_ptr; + using T = Tensor&; using NodePair = std::pair; public: + RegisteredOperation(Matrix::Operations::Code _typ, + T _res, cgNode _op = nullptr, + cgNode _op2 = nullptr) : + m_type(_typ), result(_res), + operand(_op), bin_operand(_op2) {} + + T share_tensor() const { return result; } + const Matrix::Operations::Code get_code() const { return m_type; } + NodePair get_operands(void) const; + + private: + const Matrix::Operations::Code m_type; + T result; + cgNode operand; + cgNode bin_operand; + + }; + - constexpr Matrix::Operations::Code get_operation_code(void) { return m_type; } - T share_tensor () { return result; } + class OperationFactory { + using cgNode = std::shared_ptr; + using T = Tensor&; + using NodePair = std::pair; + + public: static std::shared_ptr create( const Matrix::Operations::Code _typ, T _res, cgNode _op = nullptr, cgNode _op2 = nullptr) { - return std::shared_ptr( - new RegisteredOperation(_typ, _res, _op, _op2) + return std::make_shared( + _typ, + _res, + _op, + _op2 ); } - - std::shared_ptr get_operation(void) { - return shared_from_this(); - } - - - NodePair get_operands(void) { - - if (operand && bin_operand) { - return { - this->operand->get_operation(), - this->bin_operand->get_operation() - }; - } - else if (operand) { - return { - this->operand->get_operation(), - nullptr - }; - } - else if (bin_operand) { - return { - nullptr, - this->bin_operand->get_operation() - }; - } - - return { - nullptr, - nullptr - }; - - - } - - - protected: - const Matrix::Operations::Code m_type; - T result; - cgNode operand; - cgNode bin_operand; - private: - RegisteredOperation(const Matrix::Operations::Code _typ, T _res, - cgNode _op, cgNode _op2) : - m_type(_typ), result(_res), - operand(std::move(_op)), - bin_operand(std::move(_op2)) {} - }; diff --git a/include/network_layer.h b/include/network_layer.h index 53bce55..ccb7417 100644 --- a/include/network_layer.h +++ b/include/network_layer.h @@ -93,7 +93,7 @@ namespace NeuralNetwork { public: BinaryOperationStep(Matrix::Rows _l, Matrix::Columns _w) : - matrix(std::make_shared(_l, _w, Computation::Graph::IsTrackable(true), Computation::Graph::IsLeaf(false))) {} + matrix(std::make_shared(_l, _w, Computation::Graph::IsTrackable(true), Computation::Graph::IsLeaf(true))) {} std::shared_ptr doForward(std::shared_ptr input) { return Impl()._doForward(input);} std::shared_ptr releaseOperand() { return matrix; } protected: diff --git a/include/tensor.h b/include/tensor.h index f7da6da..b7a99e1 100644 --- a/include/tensor.h +++ b/include/tensor.h @@ -84,6 +84,7 @@ namespace NeuralNetwork { IsLeaf _f = IsLeaf(true), IsRecordable _r = IsRecordable(true)); + void backwards(); bool is_tensor_leaf() { return is_leaf; } @@ -110,8 +111,6 @@ namespace NeuralNetwork { return graph_node; } std::optional stats; - protected: - void register_leaf_op(void); private: matrix_t matrix; std::optional grad; diff --git a/include/tensor_forward_wrapper.h b/include/tensor_forward_wrapper.h index ad098b3..24a3dcb 100644 --- a/include/tensor_forward_wrapper.h +++ b/include/tensor_forward_wrapper.h @@ -22,7 +22,7 @@ namespace NeuralNetwork { namespace Graph { - /* + /* DESCRIPTION: diff --git a/m_algorithms_register.cpp b/m_algorithms_register.cpp index 3d43b6c..f023f74 100644 --- a/m_algorithms_register.cpp +++ b/m_algorithms_register.cpp @@ -12,7 +12,37 @@ namespace NeuralNetwork { namespace Graph { - + RegisteredOperation::NodePair RegisteredOperation::get_operands() const { + + if (operand && bin_operand) { + return + { + this->operand, + this->bin_operand + }; + } + else if (operand) { + return + { + this->operand, + nullptr + }; + } + else if (bin_operand) { + return + { + nullptr, + this->bin_operand + }; + } + + return + { + nullptr, + nullptr + }; + + } } diff --git a/main.cpp b/main.cpp index 086cf1b..2759d68 100644 --- a/main.cpp +++ b/main.cpp @@ -47,6 +47,9 @@ int main(void) { auto out = model.forward(ma); + out->backwards(); + + return 0; } \ No newline at end of file diff --git a/tensor.cpp b/tensor.cpp index 143602f..1005890 100644 --- a/tensor.cpp +++ b/tensor.cpp @@ -2,6 +2,7 @@ #include "matrix.h" #include "generator.h" #include "m_algorithms_register.h" +#include "tensor_backwards_pass.h" #include @@ -18,13 +19,15 @@ namespace NeuralNetwork { IsTrackable _t, IsLeaf _f, IsRecordable _r): stats({}), matrix(Matrix::Representation(_l, _w)), - grad({}), graph_node(nullptr), is_leaf(_f.get()), + grad({}), + graph_node( + OperationFactory::create( + Matrix::Operations::Code::NOP, *this)), + is_leaf(_f.get()), requires_grad(_t.get()), record_statistics(_r.get()) { Matrix::Generation::Normal<0, 1> normal_distribution_init; matrix = normal_distribution_init(matrix); - - if (is_leaf) this->register_leaf_op(); } @@ -34,21 +37,20 @@ namespace NeuralNetwork { IsTrackable _t, IsLeaf _f, IsRecordable _r) : stats({}), matrix(_m), grad({}), - graph_node(nullptr), is_leaf(_f.get()), - requires_grad(_t.get()), record_statistics(_r.get()){ - - if (is_leaf) this->register_leaf_op(); - } + graph_node( + OperationFactory::create( + Matrix::Operations::Code::NOP, *this)), + is_leaf(_f.get()), + requires_grad(_t.get()), record_statistics(_r.get()) {} - void Tensor::register_leaf_op(void) { - - auto op = RegisteredOperation::create( - Matrix::Operations::Code::NOP, - std::shared_ptr(this) - ); - register_operation(op); - } + void Tensor::backwards() { + + ReversePass reverse; + + reverse.backwards(*this, PrintTag{}); + + } } diff --git a/tensor_forward_wrapper.cpp b/tensor_forward_wrapper.cpp index 6f2f545..65a2800 100644 --- a/tensor_forward_wrapper.cpp +++ b/tensor_forward_wrapper.cpp @@ -23,7 +23,7 @@ namespace NeuralNetwork { namespace Graph { - template + template std::shared_ptr TensorOp::operator()( const std::shared_ptr l, const std::shared_ptr r) { @@ -89,14 +89,16 @@ namespace NeuralNetwork { if constexpr (Matrix::Operations::UnaryMatrixOperatable) { - out_op = RegisteredOperation::create(op_code, - out_tensor, + out_op = OperationFactory::create( + op_code, + *out_tensor, l->get_operation() ); } else if constexpr (Matrix::Operations::BinaryMatrixOperatable) { - out_op = RegisteredOperation::create(op_code, - out_tensor, + out_op = OperationFactory::create( + op_code, + *out_tensor, l->get_operation(), r->get_operation() ); @@ -153,14 +155,16 @@ namespace NeuralNetwork { if constexpr (Matrix::Operations::UnaryMatrixOperatable) { - out_op = RegisteredOperation::create(op_code, - out_tensor, + out_op = OperationFactory::create( + op_code, + *out_tensor, l->get_operation() ); } else if constexpr (Matrix::Operations::BinaryMatrixOperatable) { - out_op = RegisteredOperation::create(op_code, - out_tensor, + out_op = OperationFactory::create( + op_code, + *out_tensor, l->get_operation(), r->get_operation() );