Skip to content

Commit

Permalink
properly convert tensor and numpy in loss func test
Browse files Browse the repository at this point in the history
  • Loading branch information
henrysky committed Aug 30, 2024
1 parent 5271e53 commit 10a6721
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 35 deletions.
80 changes: 47 additions & 33 deletions tests/test_loss_func.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,38 @@
import keras
import numpy as np
import numpy.testing as npt
from astroNN.shared.nn_tools import cpu_fallback

from astroNN.config import MAGIC_NUMBER
from astroNN.nn.losses import (
binary_crossentropy,
categorical_crossentropy,
magic_correction_term,
mean_absolute_error,
mean_squared_error,
categorical_crossentropy,
binary_crossentropy,
nll,
mean_error,
zeros_loss,
mean_percentage_error,
mean_squared_error,
median,
nll,
zeros_loss,
)
from astroNN.nn.metrics import (
categorical_accuracy,
binary_accuracy,
categorical_accuracy,
mad_std,
mean_absolute_percentage_error,
mean_squared_logarithmic_error,
median_error,
median_absolute_deviation,
mad_std,
median_error,
)

# make sure this test use CPU
cpu_fallback()


def test_loss_magic():
# =============Magic correction term============= #
y_true = keras.ops.array(
[[2.0, MAGIC_NUMBER, MAGIC_NUMBER], [2.0, MAGIC_NUMBER, 4.0]]
)
npt.assert_array_equal(
keras.ops.convert_to_numpy(magic_correction_term(y_true)), [3.0, 1.5]
keras.ops.convert_to_numpy(magic_correction_term(y_true)), np.array([3.0, 1.5])
)


Expand All @@ -47,10 +44,11 @@ def test_loss_mse():

npt.assert_almost_equal(
keras.ops.convert_to_numpy(mean_absolute_error(y_true, y_pred)),
[0.0, 3.0 / 2.0],
np.array([0.0, 3.0 / 2.0]),
)
npt.assert_almost_equal(
keras.ops.convert_to_numpy(mean_squared_error(y_true, y_pred)), [0.0, 9.0 / 2]
keras.ops.convert_to_numpy(mean_squared_error(y_true, y_pred)),
np.array([0.0, 9.0 / 2]),
)

# make sure neural network prediction won't matter for magic number term
Expand All @@ -70,7 +68,7 @@ def test_loss_mean_err():
y_true = keras.ops.array([[2.0, MAGIC_NUMBER, 3.0], [2.0, MAGIC_NUMBER, 7.0]])

npt.assert_almost_equal(
keras.ops.convert_to_numpy(mean_error(y_true, y_pred)), [0.0, 0.0]
keras.ops.convert_to_numpy(mean_error(y_true, y_pred)), np.array([0.0, 0.0])
)


Expand All @@ -80,10 +78,12 @@ def test_loss_acurrancy():
y_true = keras.ops.array([[1.0, MAGIC_NUMBER, 1.0], [0.0, MAGIC_NUMBER, 1.0]])

npt.assert_array_equal(
keras.ops.convert_to_numpy(categorical_accuracy(y_true, y_pred)), [1.0, 0.0]
keras.ops.convert_to_numpy(categorical_accuracy(y_true, y_pred)),
np.array([1.0, 0.0]),
)
npt.assert_almost_equal(
keras.ops.convert_to_numpy(binary_accuracy(y_true, y_pred)), [1.0 / 2.0, 0.0]
keras.ops.convert_to_numpy(binary_accuracy(y_true, y_pred)),
np.array([1.0 / 2.0, 0.0]),
)


Expand All @@ -95,7 +95,7 @@ def test_loss_abs_error():

npt.assert_array_almost_equal(
keras.ops.convert_to_numpy(mean_absolute_percentage_error(y_true, y_pred)),
[50.0, 50.0],
np.array([50.0, 50.0]),
decimal=3,
)
# make sure neural network prediction won't matter for magic number term
Expand All @@ -114,7 +114,7 @@ def test_loss_percentage_error():

npt.assert_array_almost_equal(
keras.ops.convert_to_numpy(mean_percentage_error(y_true, y_pred)),
[50.0, 50.0],
np.array([50.0, 50.0]),
decimal=3,
)
# make sure neural network prediction won't matter for magic number term
Expand All @@ -133,7 +133,7 @@ def test_loss_log_error():

npt.assert_array_almost_equal(
keras.ops.convert_to_numpy(mean_squared_logarithmic_error(y_true, y_pred)),
[0.24, 0.24],
np.array([0.24, 0.24]),
decimal=3,
)
# make sure neural network prediction won't matter for magic number term
Expand All @@ -150,7 +150,7 @@ def test_loss_zeros():
y_true = keras.ops.array([[1.0, MAGIC_NUMBER, 1.0], [1.0, MAGIC_NUMBER, 1.0]])

npt.assert_array_almost_equal(
keras.ops.convert_to_numpy(zeros_loss(y_true, y_pred)), [0.0, 0.0]
keras.ops.convert_to_numpy(zeros_loss(y_true, y_pred)), np.array([0.0, 0.0])
)


Expand Down Expand Up @@ -213,24 +213,34 @@ def test_negative_log_likelihood():

def test_median():
y_pred = keras.ops.array([[1.0, 2.0, 3.0, 4.0, 5.0], [1.0, 2.0, 3.0, 4.0, 5.0]])
npt.assert_array_almost_equal(median(y_pred), np.median(y_pred), decimal=3)
npt.assert_array_almost_equal(
median(y_pred, axis=1), np.median(y_pred, axis=1), decimal=3
keras.ops.convert_to_numpy(median(y_pred)),
np.median(keras.ops.convert_to_numpy(y_pred)),
decimal=3,
)
npt.assert_array_almost_equal(
keras.ops.convert_to_numpy(median(y_pred, axis=1)),
np.median(keras.ops.convert_to_numpy(y_pred), axis=1),
decimal=3,
)
npt.assert_array_almost_equal(
median(y_pred, axis=0), np.median(y_pred, axis=0), decimal=3
keras.ops.convert_to_numpy(median(y_pred, axis=0)),
np.median(keras.ops.convert_to_numpy(y_pred), axis=0),
decimal=3,
)


def test_mad_std():
test_array = np.random.normal(0.0, 1.0, 100000)
test_array = keras.ops.array(np.random.normal(0.0, 1.0, 100000))
npt.assert_equal(
np.round(
keras.ops.convert_to_numpy(
mad_std(test_array, np.zeros_like(test_array), axis=None)
keras.ops.convert_to_numpy(
keras.ops.round(
keras.ops.convert_to_numpy(
mad_std(test_array, keras.ops.zeros_like(test_array), axis=None)
)
)
),
1.0,
np.array([1.0]),
)


Expand All @@ -239,10 +249,14 @@ def test_median_metrics():
y_true = keras.ops.array([[1.0, 9.0, 0.0], [1.0, -1.0, 0.0]])

npt.assert_array_almost_equal(
median_error(y_true, y_pred, axis=None), np.median(y_true - y_pred), decimal=3
keras.ops.convert_to_numpy(median_error(y_true, y_pred, axis=None)),
np.median(keras.ops.convert_to_numpy(y_true - y_pred)),
decimal=3,
)
npt.assert_array_almost_equal(
median_absolute_deviation(y_true, y_pred, axis=None),
np.median(np.abs(y_true - y_pred)),
keras.ops.convert_to_numpy(
median_absolute_deviation(y_true, y_pred, axis=None)
),
np.median(np.abs(keras.ops.convert_to_numpy(y_true - y_pred))),
decimal=3,
)
2 changes: 0 additions & 2 deletions tests/test_numpy_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import numpy as np
import numpy.testing as npt
from astroNN.config import MAGIC_NUMBER
from astroNN.shared.nn_tools import cpu_fallback
from astroNN.nn.numpy import (
kl_divergence,
l1,
Expand All @@ -19,7 +18,6 @@
sigmoid_inv,
)

cpu_fallback()


test_data = [np.array([-1.0, 2.0, 3.0, 4.0]), [-1.0, 2.0, 3.0, 4.0], 0.0]
Expand Down

0 comments on commit 10a6721

Please sign in to comment.