Skip to content

Commit

Permalink
No circular dependancy for Tensor Nodes.
Browse files Browse the repository at this point in the history
  • Loading branch information
alejandroarmas committed Apr 29, 2022
1 parent 755f5f7 commit d2a9c1f
Show file tree
Hide file tree
Showing 9 changed files with 102 additions and 85 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ endif
VPATH = shared

MAIN = main.o
OBJS = main.o matrix.o generator.o matrix_printer.o functions.o network_layer.o m_algorithms_concepts.o m_algorithms.o m_algorithms_utilities.o m_algorithms_register.o matrix_benchmark.o activation_functions.o tensor.o tensor_forward_wrapper.o
OBJS = main.o matrix.o generator.o matrix_printer.o functions.o network_layer.o m_algorithms_concepts.o m_algorithms.o m_algorithms_utilities.o m_algorithms_register.o matrix_benchmark.o activation_functions.o tensor.o tensor_forward_wrapper.o tensor_backwards_pass.o
OBJS_FOR_UNIT_TEST = $(foreach obj, $(OBJS), $(filter-out $(MAIN), $(wildcard *.o)))


Expand Down
87 changes: 33 additions & 54 deletions include/m_algorithms_register.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ and then Tensor inherits from that class.
Visitor Polymorphism depending on task,
reading for creating graph, writing data back,
...
transfer number: 7192
1-800-829-1040
*/

namespace NeuralNetwork {
Expand All @@ -24,74 +26,51 @@ namespace NeuralNetwork {

class Tensor;

class RegisteredOperation : std::enable_shared_from_this<RegisteredOperation> {
class RegisteredOperation {

using cgNode = std::shared_ptr<RegisteredOperation>;
using T = std::shared_ptr<Tensor>;
using T = Tensor&;
using NodePair = std::pair<cgNode, cgNode>;

public:
RegisteredOperation(Matrix::Operations::Code _typ,
T _res, cgNode _op = nullptr,
cgNode _op2 = nullptr) :
m_type(_typ), result(_res),
operand(_op), bin_operand(_op2) {}

T share_tensor() const { return result; }
const Matrix::Operations::Code get_code() const { return m_type; }
NodePair get_operands(void) const;

private:
const Matrix::Operations::Code m_type;
T result;
cgNode operand;
cgNode bin_operand;

};


constexpr Matrix::Operations::Code get_operation_code(void) { return m_type; }
T share_tensor () { return result; }
class OperationFactory {

using cgNode = std::shared_ptr<RegisteredOperation>;
using T = Tensor&;
using NodePair = std::pair<cgNode, cgNode>;

public:
static std::shared_ptr<RegisteredOperation> create(
const Matrix::Operations::Code _typ, T _res,
cgNode _op = nullptr, cgNode _op2 = nullptr) {

return std::shared_ptr<RegisteredOperation>(
new RegisteredOperation(_typ, _res, _op, _op2)
return std::make_shared<RegisteredOperation>(
_typ,
_res,
_op,
_op2
);

}

std::shared_ptr<RegisteredOperation> get_operation(void) {
return shared_from_this();
}


NodePair get_operands(void) {

if (operand && bin_operand) {
return {
this->operand->get_operation(),
this->bin_operand->get_operation()
};
}
else if (operand) {
return {
this->operand->get_operation(),
nullptr
};
}
else if (bin_operand) {
return {
nullptr,
this->bin_operand->get_operation()
};
}

return {
nullptr,
nullptr
};


}


protected:
const Matrix::Operations::Code m_type;
T result;
cgNode operand;
cgNode bin_operand;
private:
RegisteredOperation(const Matrix::Operations::Code _typ, T _res,
cgNode _op, cgNode _op2) :
m_type(_typ), result(_res),
operand(std::move(_op)),
bin_operand(std::move(_op2)) {}

};


Expand Down
2 changes: 1 addition & 1 deletion include/network_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ namespace NeuralNetwork {

public:
BinaryOperationStep(Matrix::Rows _l, Matrix::Columns _w) :
matrix(std::make_shared<Tensor>(_l, _w, Computation::Graph::IsTrackable(true), Computation::Graph::IsLeaf(false))) {}
matrix(std::make_shared<Tensor>(_l, _w, Computation::Graph::IsTrackable(true), Computation::Graph::IsLeaf(true))) {}
std::shared_ptr<Tensor> doForward(std::shared_ptr<Tensor> input) { return Impl()._doForward(input);}
std::shared_ptr<Tensor> releaseOperand() { return matrix; }
protected:
Expand Down
3 changes: 1 addition & 2 deletions include/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ namespace NeuralNetwork {
IsLeaf _f = IsLeaf(true),
IsRecordable _r = IsRecordable(true));

void backwards();

bool is_tensor_leaf()
{ return is_leaf; }
Expand All @@ -110,8 +111,6 @@ namespace NeuralNetwork {
return graph_node; }

std::optional<TensorStatistics> stats;
protected:
void register_leaf_op(void);
private:
matrix_t matrix;
std::optional<matrix_t> grad;
Expand Down
2 changes: 1 addition & 1 deletion include/tensor_forward_wrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace NeuralNetwork {
namespace Graph {


/*
/*
DESCRIPTION:
Expand Down
32 changes: 31 additions & 1 deletion m_algorithms_register.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,37 @@ namespace NeuralNetwork {

namespace Graph {


RegisteredOperation::NodePair RegisteredOperation::get_operands() const {

if (operand && bin_operand) {
return
{
this->operand,
this->bin_operand
};
}
else if (operand) {
return
{
this->operand,
nullptr
};
}
else if (bin_operand) {
return
{
nullptr,
this->bin_operand
};
}

return
{
nullptr,
nullptr
};

}

}

Expand Down
3 changes: 3 additions & 0 deletions main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ int main(void) {

auto out = model.forward(ma);

out->backwards();



return 0;
}
34 changes: 18 additions & 16 deletions tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#include "matrix.h"
#include "generator.h"
#include "m_algorithms_register.h"
#include "tensor_backwards_pass.h"

#include <memory>

Expand All @@ -18,13 +19,15 @@ namespace NeuralNetwork {
IsTrackable _t, IsLeaf _f, IsRecordable _r):
stats({}),
matrix(Matrix::Representation(_l, _w)),
grad({}), graph_node(nullptr), is_leaf(_f.get()),
grad({}),
graph_node(
OperationFactory::create(
Matrix::Operations::Code::NOP, *this)),
is_leaf(_f.get()),
requires_grad(_t.get()), record_statistics(_r.get()) {

Matrix::Generation::Normal<0, 1> normal_distribution_init;
matrix = normal_distribution_init(matrix);

if (is_leaf) this->register_leaf_op();


}
Expand All @@ -34,21 +37,20 @@ namespace NeuralNetwork {
IsTrackable _t, IsLeaf _f, IsRecordable _r) :
stats({}),
matrix(_m), grad({}),
graph_node(nullptr), is_leaf(_f.get()),
requires_grad(_t.get()), record_statistics(_r.get()){

if (is_leaf) this->register_leaf_op();
}
graph_node(
OperationFactory::create(
Matrix::Operations::Code::NOP, *this)),
is_leaf(_f.get()),
requires_grad(_t.get()), record_statistics(_r.get()) {}


void Tensor::register_leaf_op(void) {

auto op = RegisteredOperation::create(
Matrix::Operations::Code::NOP,
std::shared_ptr<Tensor>(this)
);
register_operation(op);
}
void Tensor::backwards() {

ReversePass reverse;

reverse.backwards(*this, PrintTag{});

}


}
Expand Down
22 changes: 13 additions & 9 deletions tensor_forward_wrapper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ namespace NeuralNetwork {
namespace Graph {


template <Matrix::Operations::MatrixOperatable Operator>
template <Matrix::Operations::MatrixOperatable Operator>
std::shared_ptr<Tensor> TensorOp<Operator>::operator()(
const std::shared_ptr<Tensor> l,
const std::shared_ptr<Tensor> r) {
Expand Down Expand Up @@ -89,14 +89,16 @@ namespace NeuralNetwork {


if constexpr (Matrix::Operations::UnaryMatrixOperatable<Operator>) {
out_op = RegisteredOperation::create(op_code,
out_tensor,
out_op = OperationFactory::create(
op_code,
*out_tensor,
l->get_operation()
);
}
else if constexpr (Matrix::Operations::BinaryMatrixOperatable<Operator>) {
out_op = RegisteredOperation::create(op_code,
out_tensor,
out_op = OperationFactory::create(
op_code,
*out_tensor,
l->get_operation(),
r->get_operation()
);
Expand Down Expand Up @@ -153,14 +155,16 @@ namespace NeuralNetwork {


if constexpr (Matrix::Operations::UnaryMatrixOperatable<Operator>) {
out_op = RegisteredOperation::create(op_code,
out_tensor,
out_op = OperationFactory::create(
op_code,
*out_tensor,
l->get_operation()
);
}
else if constexpr (Matrix::Operations::BinaryMatrixOperatable<Operator>) {
out_op = RegisteredOperation::create(op_code,
out_tensor,
out_op = OperationFactory::create(
op_code,
*out_tensor,
l->get_operation(),
r->get_operation()
);
Expand Down

0 comments on commit d2a9c1f

Please sign in to comment.