Skip to content

Commit

Permalink
update(python-example): optional argument for displaying progress bar
Browse files Browse the repository at this point in the history
  • Loading branch information
Az-r-ow committed Mar 28, 2024
1 parent f03097e commit 0794e87
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 37 deletions.
24 changes: 10 additions & 14 deletions examples/train-predict-MNIST/helpers/event_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,30 +36,26 @@ def get_drawing(context):


def handle_ui_button_pressed(context):
if context["event"].ui_element == context["ui_elements"]["guess_button"]:
event, ui_elements = context["event"], context["ui_elements"]

if event.ui_element == ui_elements["guess_button"]:
normalized_image = get_drawing(context)
prediction = find_highest_indexes_in_matrix(network.predict([normalized_image]))
context["ui_elements"]["guess_text"].append_html_text(f"I'm guessing : {prediction[0]}<br>")
ui_elements["guess_text"].append_html_text(f"I'm guessing : {prediction[0]}<br>")

if context["event"].ui_element == context["ui_elements"]["learn_button"]:
if event.ui_element == ui_elements["learn_button"]:
normalized_image = get_drawing(context)
target = float(context["ui_elements"]["dropdown"].selected_option)
loss = network.train([normalized_image], [target], 1)
context["ui_elements"]["guess_text"].append_html_text(f"I'm learning that it's a {int(target)}<br>loss : {loss}")

if context["event"].ui_element == context["ui_elements"]["dropdown"]:
print("dropdown has been clicked")
target = float(ui_elements["dropdown"].selected_option)
loss = network.train([normalized_image], [target], 1, progBar=False)
ui_elements["guess_text"].append_html_text(f"I'm learning that it's a {int(target)}<br>loss : {loss}")

if context["event"].ui_element == context["ui_elements"]["clear_button"]:
context["ui_elements"]["drawing_surface"].fill(erasing_color)
return
if event.ui_element == ui_elements["clear_button"]:
ui_elements["drawing_surface"].fill(erasing_color)

def handle_dropdown_change(context):
event = context['event']
if event.ui_element == context["ui_elements"]["dropdown"]:
print("Selected Option ", event.text)
context["ui_elements"]["dropdown"].close = True
return

def handle_mouse_button_down(context):
global drawing, erasing
Expand Down
23 changes: 17 additions & 6 deletions src/NeuralNet/Network.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,9 @@ std::shared_ptr<Layer> Network::getOutputLayer() const {

double Network::train(std::vector<std::vector<double>> inputs,
std::vector<double> labels, int epochs,
std::vector<std::shared_ptr<Callback>> callbacks) {
std::vector<std::shared_ptr<Callback>> callbacks,
bool progBar) {
this->progBar = progBar;
try {
return onlineTraining(inputs, labels, epochs, callbacks);
} catch (const std::exception &e) {
Expand All @@ -65,7 +67,9 @@ double Network::train(std::vector<std::vector<double>> inputs,

double Network::train(std::vector<std::vector<std::vector<double>>> inputs,
std::vector<double> labels, int epochs,
std::vector<std::shared_ptr<Callback>> callbacks) {
std::vector<std::shared_ptr<Callback>> callbacks,
bool progBar) {
this->progBar = progBar;
try {
return onlineTraining(inputs, labels, epochs, callbacks);
} catch (const std::exception &e) {
Expand All @@ -79,7 +83,9 @@ double Network::train(std::vector<std::vector<std::vector<double>>> inputs,
double Network::train(
TrainingData<std::vector<std::vector<double>>, std::vector<double>>
trainingData,
int epochs, std::vector<std::shared_ptr<Callback>> callbacks) {
int epochs, std::vector<std::shared_ptr<Callback>> callbacks,
bool progBar) {
this->progBar = progBar;
try {
return this->trainer(trainingData, epochs, callbacks);
} catch (const std::exception &e) {
Expand All @@ -93,7 +99,9 @@ double Network::train(
TrainingData<std::vector<std::vector<std::vector<double>>>,
std::vector<double>>
trainingData,
int epochs, std::vector<std::shared_ptr<Callback>> callbacks) {
int epochs, std::vector<std::shared_ptr<Callback>> callbacks,
bool progBar) {
this->progBar = progBar;
try {
return this->trainer(trainingData, epochs, callbacks);
} catch (const std::exception &e) {
Expand Down Expand Up @@ -134,8 +142,9 @@ double Network::miniBatchTraining(
accuracy = computeAccuracy(o, y);
sumLoss += loss;
this->backProp(o, y);
g.printWithLAndA(loss, accuracy);
trainingCheckpoint("onBatchEnd", callbacks);
if (!this->progBar) continue; // Skip when disabled
g.printWithLAndA(loss, accuracy);
}
trainingCheckpoint("onEpochEnd", callbacks);
}
Expand Down Expand Up @@ -165,8 +174,9 @@ double Network::batchTraining(
sumLoss += loss;

this->backProp(o, y);
g.printWithLAndA(loss, accuracy);
trainingCheckpoint("onEpochEnd", callbacks);
if (!this->progBar) continue; // Skip when disabled
g.printWithLAndA(loss, accuracy);
}

trainingCheckpoint("onTrainEnd", callbacks);
Expand Down Expand Up @@ -195,6 +205,7 @@ double Network::onlineTraining(
sumLoss += loss;
tCorrect += computeAccuracy(o, y);
this->backProp(o, y);
if (!this->progBar) continue; // Skip when disabled
tg.printWithLoss(loss);
}
// Computing metrics for the logs
Expand Down
22 changes: 17 additions & 5 deletions src/NeuralNet/Network.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,12 +85,15 @@ class Network : public Model {
* @param epochs
* @param callbacks A vector of `Callback` that will be called during training
* stages
* @param progBar Ouput a progress bar for the training process . Default:
* `true`
*
* @return The last training's loss
*/
double train(std::vector<std::vector<double>> inputs,
std::vector<double> labels, int epochs = 1,
const std::vector<std::shared_ptr<Callback>> callbacks = {});
const std::vector<std::shared_ptr<Callback>> callbacks = {},
bool progBar = true);

/**
* @brief This method will Train the model with the given inputs and labels
Expand All @@ -100,12 +103,15 @@ class Network : public Model {
* @param epochs
* @param callbacks A vector of `Callback` that will be called during training
* stages
* @param progBar Whether to output a progress bar for the training process.
* Default: `true`
*
* @return The last training's loss
*/
double train(std::vector<std::vector<std::vector<double>>> inputs,
std::vector<double> labels, int epochs = 1,
const std::vector<std::shared_ptr<Callback>> callbacks = {});
const std::vector<std::shared_ptr<Callback>> callbacks = {},
bool progBar = true);

/**
* @brief This method will train the model with the given TrainingData
Expand All @@ -114,14 +120,17 @@ class Network : public Model {
* @param epochs
* @param callbacks A vector of `Callback` that will be called during training
* stages
* @param progBar Whether to output a progress bar for the training
* process. Default: `true`
*
* @return The last training's loss
*/
double train(
TrainingData<std::vector<std::vector<double>>, std::vector<double>>
trainingData,
int epochs = 1,
const std::vector<std::shared_ptr<Callback>> callbacks = {});
const std::vector<std::shared_ptr<Callback>> callbacks = {},
bool progBar = true);

/**
* @brief This method will train the model with the given TrainingData
Expand All @@ -130,14 +139,17 @@ class Network : public Model {
* @param epochs
* @param callbacks A vector of `Callback` that will be called during training
* stages
* @param progBar Whether to output a progress bar for the training process.
* Default: `true`
*
* @return The last training's loss
*/
double train(TrainingData<std::vector<std::vector<std::vector<double>>>,
std::vector<double>>
trainingData,
int epochs = 1,
const std::vector<std::shared_ptr<Callback>> callbacks = {});
const std::vector<std::shared_ptr<Callback>> callbacks = {},
bool progBar = true);

/**
* @brief This model will try to make predictions based off the inputs passed
Expand Down Expand Up @@ -167,7 +179,7 @@ class Network : public Model {
double loss = 0, accuracy = 0;
std::vector<std::shared_ptr<Layer>> layers;
LOSS lossFunc; // Storing the loss function for serialization
bool debugMode = false;
bool progBar = true;
double (*cmpLoss)(const Eigen::MatrixXd &, const Eigen::MatrixXd &);
Eigen::MatrixXd (*cmpLossGrad)(const Eigen::MatrixXd &,
const Eigen::MatrixXd &);
Expand Down
38 changes: 26 additions & 12 deletions src/bindings/NeuralNetPy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -416,11 +416,11 @@ PYBIND11_MODULE(NeuralNetPy, m) {
.def("train",
static_cast<double (Network::*)(
std::vector<std::vector<double>>, std::vector<double>, int,
const std::vector<std::shared_ptr<Callback>>)>(&Network::train),
py::arg("inputs"),
py::arg("targets"),
py::arg("epochs"),
py::arg("callbacks") = std::vector<std::shared_ptr<Callback>>(),
const std::vector<std::shared_ptr<Callback>>, bool)>(
&Network::train),
py::arg("inputs"), py::arg("targets"), py::arg("epochs"),
py::arg("callbacks") = std::vector<std::shared_ptr<Callback>>(),
py::arg("progBar") = true,
R"pbdoc(
Train the network by passing it 2 dimensional inputs (vectors).
Expand All @@ -432,6 +432,8 @@ PYBIND11_MODULE(NeuralNetPy, m) {
:type epochs: int
:param callbacks: A list of callbacks to be used during the training
:type callbacks: list[Callback]
:param progBar: Whether or not to enable the progress bar
:type progBar: bool
:return: The average loss throughout the training
:rtype: float
Expand Down Expand Up @@ -461,11 +463,11 @@ PYBIND11_MODULE(NeuralNetPy, m) {
static_cast<double (Network::*)(
std::vector<std::vector<std::vector<double>>>,
std::vector<double>, int,
const std::vector<std::shared_ptr<Callback>>)>(&Network::train),
py::arg("inputs"),
py::arg("targets"),
py::arg("epochs"),
py::arg("callbacks") = std::vector<std::shared_ptr<Callback>>(),
const std::vector<std::shared_ptr<Callback>>, bool)>(
&Network::train),
py::arg("inputs"), py::arg("targets"), py::arg("epochs"),
py::arg("callbacks") = std::vector<std::shared_ptr<Callback>>(),
py::arg("progBar") = true,
R"pbdoc(
Train the network by passing it a list of 3 dimensional inputs (matrices).
Expand All @@ -477,6 +479,8 @@ PYBIND11_MODULE(NeuralNetPy, m) {
:type epochs: int
:param callbacks: A list of callbacks to be used during the training
:type callbacks: list[Callback]
:param progBar: Whether or not to enable the progress bar
:type progBar: bool
:return: The average loss throughout the training
:rtype: float
Expand Down Expand Up @@ -512,8 +516,11 @@ PYBIND11_MODULE(NeuralNetPy, m) {
static_cast<double (Network::*)(
TrainingData<std::vector<std::vector<double>>,
std::vector<double>>,
int, const std::vector<std::shared_ptr<Callback>>)>(
int, const std::vector<std::shared_ptr<Callback>>, bool)>(
&Network::train),
py::arg("trainingData"), py::arg("epochs"),
py::arg("callbacks") = std::vector<std::shared_ptr<Callback>>(),
py::arg("progBar") = true,
R"pbdoc(
Train the network by passing it a ``TrainingData2dI`` object.
Expand All @@ -523,6 +530,8 @@ PYBIND11_MODULE(NeuralNetPy, m) {
:type epochs: int
:param callbacks: A list of callbacks to be used during the training
:type callbacks: list[Callback]
:param progBar: Whether or not to enable the progress bar
:type progBar: bool
:return: The average loss throughout the training
:rtype: float
Expand Down Expand Up @@ -557,8 +566,11 @@ PYBIND11_MODULE(NeuralNetPy, m) {
static_cast<double (Network::*)(
TrainingData<std::vector<std::vector<std::vector<double>>>,
std::vector<double>>,
int, const std::vector<std::shared_ptr<Callback>>)>(
int, const std::vector<std::shared_ptr<Callback>>, bool)>(
&Network::train),
py::arg("trainingData"), py::arg("epochs"),
py::arg("callbacks") = std::vector<std::shared_ptr<Callback>>(),
py::arg("progBar") = true,
R"pbdoc(
Train the network by passing it a ``TrainingData3dI`` object.
Expand All @@ -568,6 +580,8 @@ PYBIND11_MODULE(NeuralNetPy, m) {
:type epochs: int
:param callbacks: A list of callbacks to be used during the training
:type callbacks: list[Callback]
:param progBar: Whether or not to enable the progress bar
:type progBar: bool
:return: The average loss throughout the training
:rtype: float
Expand Down

0 comments on commit 0794e87

Please sign in to comment.