diff --git a/Makefile b/Makefile index b02a9d9..2bf19b1 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ endif VPATH = shared MAIN = main.o -OBJS = main.o matrix.o generator.o matrix_printer.o functions.o network_layer.o m_algorithms_concepts.o m_algorithms.o m_algorithms_utilities.o m_algorithms_register.o matrix_benchmark.o activation_functions.o tensor.o +OBJS = main.o matrix.o generator.o matrix_printer.o functions.o network_layer.o m_algorithms_concepts.o m_algorithms.o m_algorithms_utilities.o m_algorithms_register.o matrix_benchmark.o activation_functions.o tensor.o tensor_forward_wrapper.o OBJS_FOR_UNIT_TEST = $(foreach obj, $(OBJS), $(filter-out $(MAIN), $(wildcard *.o))) diff --git a/activation_functions.cpp b/activation_functions.cpp index 0334633..823c332 100644 --- a/activation_functions.cpp +++ b/activation_functions.cpp @@ -1,5 +1,6 @@ #include +#include "tensor_forward_wrapper.h" #include "activation_functions.h" #include "m_algorithms.h" // #include "matrix_printer.h" diff --git a/include/tensor.h b/include/tensor.h index b782621..f7da6da 100644 --- a/include/tensor.h +++ b/include/tensor.h @@ -6,7 +6,6 @@ #include "m_algorithms.h" #include "m_algorithms_register.h" #include "m_algorithms_concepts.h" -#include "matrix_benchmark.h" #include #include @@ -18,8 +17,21 @@ namespace NeuralNetwork { namespace Graph { + using IsTrackable = + Matrix::NamedType; + + + using IsLeaf = + Matrix::NamedType; + + + using IsRecordable = + Matrix::NamedType; + + class RegisteredOperation; + class TensorStatistics { @@ -55,14 +67,6 @@ namespace NeuralNetwork { time_t matrix_operation_t2; }; - using IsTrackable = - Matrix::NamedType; - - using IsLeaf = - Matrix::NamedType; - - using IsRecordable = - Matrix::NamedType; class Tensor { @@ -119,109 +123,6 @@ namespace NeuralNetwork { }; - - /* - - DESCRIPTION: - - Functor follows 'Strategy' behavioral pattern for defining a family - of functions on the permutations of either benchmarking (or not) an - operation as well as the operation being binary (or unary). - - - USAGE: - - TensorOp mm(std::make_unique< - Matrix::Operations::Binary::Multiplication::ParallelDNC>()); - - auto out = mm(input, this->matrix); - - - */ - - - template - class TensorOp { - - public: - TensorOp(const Operator& _op) : op_type(_op) {} - - std::shared_ptr operator()( - const std::shared_ptr l, - const std::shared_ptr r = nullptr); - private: - Operator op_type; - }; - - - class ComputeTag; - class RecordTag; - - - class PerformTensorStrategy { - - public: - PerformTensorStrategy() = default; - - template - std::shared_ptr compute( - Operator _op, - const std::shared_ptr l, - const std::shared_ptr r, - ComputeTag _); - - template - std::shared_ptr compute( - Operator _op, - const std::shared_ptr l, - const std::shared_ptr r, - RecordTag _); - - }; - - - - /* - - DESCRIPTION: - - Curiosily recurring Template Pattern for - accepting Strategy implementation visitor - - USAGE: - - if (recordTensorOperation && isBinaryOp) { - - RecordBinaryTag _; - - return _.compute_tensor(std::move(op_type), l, r, implementation); - } - - */ - template - struct StrategyTag { - - template - std::shared_ptr compute_tensor( - Operator _op, - const std::shared_ptr l, - const std::shared_ptr r, - PerformTensorStrategy& strat_implementation) { - - return strat_implementation.compute( - _op, l, r, *static_cast< - StrategyType const*>(this)); - } }; - - class ComputeTag : public StrategyTag { - public: - ComputeTag() = default; - }; - class RecordTag : public StrategyTag { - public: - RecordTag() = default; - }; - } diff --git a/include/tensor_forward_wrapper.h b/include/tensor_forward_wrapper.h new file mode 100644 index 0000000..ad098b3 --- /dev/null +++ b/include/tensor_forward_wrapper.h @@ -0,0 +1,137 @@ +#ifndef TENSOR_FORWARD_WRAPPER +#define TENSOR_FORWARD_WRAPPER + + +#include "tensor.h" +#include "matrix.h" +#include "m_algorithms.h" +#include "m_algorithms_register.h" +#include "m_algorithms_concepts.h" +#include "matrix_benchmark.h" + +#include +#include +#include + + + +namespace NeuralNetwork { + + namespace Computation { + + namespace Graph { + + + /* + + DESCRIPTION: + + Functor follows 'Strategy' behavioral pattern for defining a family + of functions on the permutations of either benchmarking (or not) an + operation as well as the operation being binary (or unary). + + + USAGE: + + TensorOp mm(std::make_unique< + Matrix::Operations::Binary::Multiplication::ParallelDNC>()); + + auto out = mm(input, this->matrix); + + + */ + + + template + class TensorOp { + + public: + TensorOp(const Operator& _op) : op_type(_op) {} + + std::shared_ptr operator()( + const std::shared_ptr l, + const std::shared_ptr r = nullptr); + private: + Operator op_type; + }; + + + class ComputeTag; + class RecordTag; + + + class PerformTensorStrategy { + + public: + PerformTensorStrategy() = default; + + template + std::shared_ptr compute( + Operator _op, + const std::shared_ptr l, + const std::shared_ptr r, + ComputeTag _); + + template + std::shared_ptr compute( + Operator _op, + const std::shared_ptr l, + const std::shared_ptr r, + RecordTag _); + + }; + + + + /* + + DESCRIPTION: + + Curiosily recurring Template Pattern for + accepting Strategy implementation visitor + + USAGE: + + if (recordTensorOperation && isBinaryOp) { + + RecordBinaryTag _; + + return _.compute_tensor(std::move(op_type), l, r, implementation); + } + + */ + template + struct StrategyTag { + + template + std::shared_ptr compute_tensor( + Operator _op, + const std::shared_ptr l, + const std::shared_ptr r, + PerformTensorStrategy& strat_implementation) { + + return strat_implementation.compute( + _op, l, r, *static_cast< + StrategyType const*>(this)); + } }; + + class ComputeTag : public StrategyTag { + public: + ComputeTag() = default; + }; + class RecordTag : public StrategyTag { + public: + RecordTag() = default; + }; + + + + + } + + } + +} + + +#endif // TENSOR_FORWARD_WRAPPER \ No newline at end of file diff --git a/main.cpp b/main.cpp index 671bcd6..086cf1b 100644 --- a/main.cpp +++ b/main.cpp @@ -47,11 +47,6 @@ int main(void) { auto out = model.forward(ma); - // NeuralNetwork::Computation::Tree::ComputeOperation handler; - // handler.setNextHandler(std::make_unique()); - // handler.setNextHandler(std::make_unique()); - // handler.handle("test"); - return 0; } \ No newline at end of file diff --git a/network_layer.cpp b/network_layer.cpp index 9fbe461..7e85ac6 100644 --- a/network_layer.cpp +++ b/network_layer.cpp @@ -2,6 +2,7 @@ #include // std::for_each #include "tensor.h" +#include "tensor_forward_wrapper.h" #include "network_layer.h" #include "m_algorithms.h" // #include "matrix_printer.h" diff --git a/tensor.cpp b/tensor.cpp index 1d71244..143602f 100644 --- a/tensor.cpp +++ b/tensor.cpp @@ -1,15 +1,9 @@ #include "tensor.h" - #include "matrix.h" #include "generator.h" -#include "m_algorithms.h" #include "m_algorithms_register.h" -#include "m_algorithms_utilities.h" -#include -#include #include -#include namespace NeuralNetwork { @@ -57,162 +51,6 @@ namespace NeuralNetwork { } - - template - std::shared_ptr TensorOp::operator()( - const std::shared_ptr l, - const std::shared_ptr r) { - - bool recordTensorOperation = l->is_recorded() || r->is_recorded(); - - PerformTensorStrategy implementation; - - if (recordTensorOperation) { - - RecordTag _; - - return _.compute_tensor(op_type, l, r, implementation); - } - - ComputeTag _; - - return _.compute_tensor(op_type, l, nullptr, implementation); - - } - - - template class TensorOp; - template class TensorOp; - template class TensorOp; - template class TensorOp; - template class TensorOp; - template class TensorOp; - template class TensorOp; - - - - template - std::shared_ptr PerformTensorStrategy::compute( - Operator _op, - const std::shared_ptr l, - const std::shared_ptr r, - ComputeTag _ ) { - - - Matrix::Representation out_matrix; - std::shared_ptr out_tensor; - std::shared_ptr out_op; - - Matrix::Operations::Utility::Codify codify; - auto op_code = codify(_op); - - - if constexpr (Matrix::Operations::UnaryMatrixOperatable) { - out_matrix = _op(l->release_matrix()); - } - else if constexpr (Matrix::Operations::BinaryMatrixOperatable) { - out_matrix = _op( - l->release_matrix(), - r->release_matrix() - ); - } - - - out_tensor = std::make_shared - (std::move(out_matrix), IsTrackable(true), - IsLeaf(false), IsRecordable(false)); - - - if constexpr (Matrix::Operations::UnaryMatrixOperatable) { - out_op = RegisteredOperation::create(op_code, - out_tensor, - l->get_operation() - ); - } - else if constexpr (Matrix::Operations::BinaryMatrixOperatable) { - out_op = RegisteredOperation::create(op_code, - out_tensor, - l->get_operation(), - r->get_operation() - ); - } - - out_tensor->register_operation(out_op); - - return out_tensor; - - } - - - - template - std::shared_ptr PerformTensorStrategy::compute( - Operator _op, - const std::shared_ptr l, - const std::shared_ptr r, - RecordTag _) { - - TensorStatistics _s; - _s.set_graph_start(std::chrono::steady_clock::now()); - - Matrix::Operations::Utility::Stringify stringify; - _s.set_operation_string(stringify(_op)); - - Matrix::Operations::Utility::Codify codify; - Matrix::Operations::Code op_code = codify(_op); - - Matrix::Representation out_matrix; - std::shared_ptr out_tensor; - std::shared_ptr out_op; - - - _s.set_matrix_start(std::chrono::steady_clock::now()); - - if constexpr (Matrix::Operations::UnaryMatrixOperatable) { - out_matrix = _op( - l->release_matrix()); - } - else if constexpr (Matrix::Operations::BinaryMatrixOperatable) { - out_matrix = _op( - l->release_matrix(), - r->release_matrix() - ); - } - - _s.set_matrix_end(std::chrono::steady_clock::now()); - - - out_tensor = std::make_shared( - std::move(out_matrix), IsTrackable(true), - IsLeaf(false), IsRecordable(true)); - - - if constexpr (Matrix::Operations::UnaryMatrixOperatable) { - out_op = RegisteredOperation::create(op_code, - out_tensor, - l->get_operation() - ); - } - else if constexpr (Matrix::Operations::BinaryMatrixOperatable) { - out_op = RegisteredOperation::create(op_code, - out_tensor, - l->get_operation(), - r->get_operation() - ); - } - - - out_tensor->register_operation(out_op); - - _s.set_graph_end(std::chrono::steady_clock::now()); - out_tensor->stats = _s; - - - return out_tensor; - - } - - } } diff --git a/tensor_forward_wrapper.cpp b/tensor_forward_wrapper.cpp new file mode 100644 index 0000000..6f2f545 --- /dev/null +++ b/tensor_forward_wrapper.cpp @@ -0,0 +1,185 @@ + +#include "tensor_forward_wrapper.h" +#include "tensor.h" + +#include "matrix.h" +#include "generator.h" +#include "m_algorithms.h" +#include "m_algorithms_register.h" +#include "m_algorithms_utilities.h" + +#include +#include +#include +#include + + + + +namespace NeuralNetwork { + + namespace Computation { + + namespace Graph { + + + template + std::shared_ptr TensorOp::operator()( + const std::shared_ptr l, + const std::shared_ptr r) { + + bool recordTensorOperation = l->is_recorded() || r->is_recorded(); + + PerformTensorStrategy implementation; + + if (recordTensorOperation) { + + RecordTag _; + + return _.compute_tensor(op_type, l, r, implementation); + } + + ComputeTag _; + + return _.compute_tensor(op_type, l, nullptr, implementation); + + } + + + template class TensorOp; + template class TensorOp; + template class TensorOp; + template class TensorOp; + template class TensorOp; + template class TensorOp; + template class TensorOp; + + + + template + std::shared_ptr PerformTensorStrategy::compute( + Operator _op, + const std::shared_ptr l, + const std::shared_ptr r, + ComputeTag _ ) { + + + Matrix::Representation out_matrix; + std::shared_ptr out_tensor; + std::shared_ptr out_op; + + Matrix::Operations::Utility::Codify codify; + auto op_code = codify(_op); + + + if constexpr (Matrix::Operations::UnaryMatrixOperatable) { + out_matrix = _op(l->release_matrix()); + } + else if constexpr (Matrix::Operations::BinaryMatrixOperatable) { + out_matrix = _op( + l->release_matrix(), + r->release_matrix() + ); + } + + + out_tensor = std::make_shared + (std::move(out_matrix), IsTrackable(true), + IsLeaf(false), IsRecordable(false)); + + + if constexpr (Matrix::Operations::UnaryMatrixOperatable) { + out_op = RegisteredOperation::create(op_code, + out_tensor, + l->get_operation() + ); + } + else if constexpr (Matrix::Operations::BinaryMatrixOperatable) { + out_op = RegisteredOperation::create(op_code, + out_tensor, + l->get_operation(), + r->get_operation() + ); + } + + out_tensor->register_operation(out_op); + + return out_tensor; + + } + + + + template + std::shared_ptr PerformTensorStrategy::compute( + Operator _op, + const std::shared_ptr l, + const std::shared_ptr r, + RecordTag _) { + + TensorStatistics _s; + _s.set_graph_start(std::chrono::steady_clock::now()); + + Matrix::Operations::Utility::Stringify stringify; + _s.set_operation_string(stringify(_op)); + + Matrix::Operations::Utility::Codify codify; + Matrix::Operations::Code op_code = codify(_op); + + Matrix::Representation out_matrix; + std::shared_ptr out_tensor; + std::shared_ptr out_op; + + + _s.set_matrix_start(std::chrono::steady_clock::now()); + + if constexpr (Matrix::Operations::UnaryMatrixOperatable) { + out_matrix = _op( + l->release_matrix()); + } + else if constexpr (Matrix::Operations::BinaryMatrixOperatable) { + out_matrix = _op( + l->release_matrix(), + r->release_matrix() + ); + } + + _s.set_matrix_end(std::chrono::steady_clock::now()); + + + out_tensor = std::make_shared( + std::move(out_matrix), IsTrackable(true), + IsLeaf(false), IsRecordable(true)); + + + if constexpr (Matrix::Operations::UnaryMatrixOperatable) { + out_op = RegisteredOperation::create(op_code, + out_tensor, + l->get_operation() + ); + } + else if constexpr (Matrix::Operations::BinaryMatrixOperatable) { + out_op = RegisteredOperation::create(op_code, + out_tensor, + l->get_operation(), + r->get_operation() + ); + } + + + out_tensor->register_operation(out_op); + + _s.set_graph_end(std::chrono::steady_clock::now()); + out_tensor->stats = _s; + + + return out_tensor; + + } + + + } + + } + +} \ No newline at end of file