Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docs (model): corrected default for tf_epochs #221

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions aucmedi/data_processing/data_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,9 @@ def _get_batches_of_transformed_samples(self, index_array):

# Stack images and optional metadata together into a batch
input_stack = np.stack(batch_stack[0], axis=0)
if self.metadata is not None:
if self.metadata is not None and self.labels is None:
input_stack = [[input_stack, self.metadata[index_array]]]
elif self.metadata is not None and self.labels is not None:
input_stack = [input_stack, self.metadata[index_array]]
batch = (input_stack, )
# Stack classifications together into a batch if available
Expand Down Expand Up @@ -414,4 +416,4 @@ def __set_index_array__(self):

""" Internal function at the end of an epoch. """
def on_epoch_end(self):
self.__set_index_array__()
self.__set_index_array__()
2 changes: 1 addition & 1 deletion aucmedi/neural_network/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def __init__(self, n_labels, channels, input_shape=None, architecture=None,
However, be aware of unexpected adverse effects (experimental)!

Attributes:
tf_epochs (int, default=5): Transfer Learning configuration: Number of epochs with frozen layers except classification head.
tf_epochs (int, default=10): Transfer Learning configuration: Number of epochs with frozen layers except classification head.
tf_lr_start (float, default=1e-4): Transfer Learning configuration: Starting learning rate for frozen layer fitting.
tf_lr_end (float, default=1e-5): Transfer Learning configuration: Starting learning rate after layer unfreezing.
meta_input (tuple of int): Meta variable: Input shape of architecture which can be passed to a DataGenerator. For example: (224, 224).
Expand Down
8 changes: 4 additions & 4 deletions tests/test_datagenerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,9 +175,9 @@ def test_RUN_Metadata_noLabel(self):
for i in range(0, 10):
batch = data_gen[i]
self.assertTrue(len(batch), 1)
self.assertTrue(len(batch[0]) == 2)
self.assertTrue(np.array_equal(batch[0][0].shape, (5, 224, 224, 3)))
self.assertTrue(np.array_equal(batch[0][1].shape, (5, 10)))
self.assertTrue(len(batch[0][0]) == 2)
self.assertTrue(np.array_equal(batch[0][0][0].shape, (5, 224, 224, 3)))
self.assertTrue(np.array_equal(batch[0][0][1].shape, (5, 10)))

# Usage: Metadata for training
def test_RUN_Metadata_withLabel(self):
Expand Down Expand Up @@ -245,4 +245,4 @@ def test_utils_iter(self):
else:
self.assertTrue(np.array_equal(batch[0].shape, (1,224,224,3)))
counter += 1
self.assertTrue(counter == 4)
self.assertTrue(counter == 4)
136 changes: 136 additions & 0 deletions tests/test_neuralnetwork_metadata.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
#==============================================================================#
# Author: Dominik Müller #
# Copyright: 2024 IT-Infrastructure for Translational Medical Research, #
# University of Augsburg #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
#==============================================================================#
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
#External libraries
import unittest
import tempfile
import os
from PIL import Image
import numpy as np
#Internal libraries
from aucmedi import *

unittest.TestLoader.sortTestMethodsUsing = None

#-----------------------------------------------------#
# Unittest: NeuralNetwork #
#-----------------------------------------------------#
class NeuralNetworkTEST(unittest.TestCase):
# Create random imaging and classification data
@classmethod
def setUpClass(self):
np.random.seed(1234)
# Initialize temporary directory
self.tmp_data = tempfile.TemporaryDirectory(prefix="tmp.aucmedi.",
suffix=".data")

# Create RGB data
self.sampleList_rgb = []
for i in range(0, 10):
img_rgb = np.random.rand(32, 32, 3) * 255
imgRGB_pillow = Image.fromarray(img_rgb.astype(np.uint8))
index = "image.sample_" + str(i) + ".RGB.png"
path_sampleRGB = os.path.join(self.tmp_data.name, index)
imgRGB_pillow.save(path_sampleRGB)
self.sampleList_rgb.append(index)

# Create classification labels
self.labels_ohe = np.zeros((10, 4), dtype=np.uint8)
for i in range(0, 10):
class_index = np.random.randint(0, 4)
self.labels_ohe[i][class_index] = 1

# Create metadata labels
self.labels_metadata = np.zeros((10, 5), dtype=np.uint8)
for i in range(0, 10):
class_index = np.random.randint(0, 5)
self.labels_metadata[i][class_index] = 1

# Create RGB Data Generator
self.datagen = DataGenerator(self.sampleList_rgb,
self.tmp_data.name,
metadata=self.labels_metadata,
labels=self.labels_ohe,
resize=(32, 32),
shuffle=True,
grayscale=False, batch_size=3)

#-------------------------------------------------#
# Model Training #
#-------------------------------------------------#
def test_training_pure(self):
model = NeuralNetwork(n_labels=4, channels=3, batch_queue_size=1, meta_variables=5)
hist = model.train(training_generator=self.datagen,
epochs=3)
self.assertTrue("loss" in hist)

def test_training_iterations(self):
model = NeuralNetwork(n_labels=4, channels=3, batch_queue_size=1, meta_variables=5)
hist = model.train(training_generator=self.datagen,
epochs=5, iterations=10)
self.assertTrue("loss" in hist)
self.assertTrue(len(hist["loss"]) == 5)

hist = model.train(training_generator=self.datagen,
epochs=3, iterations=2)
self.assertTrue("loss" in hist)
self.assertTrue(len(hist["loss"]) == 3)

def test_training_validation(self):
model = NeuralNetwork(n_labels=4, channels=3, batch_queue_size=1, meta_variables=5)
hist = model.train(training_generator=self.datagen,
validation_generator=self.datagen,
epochs=4)
self.assertTrue("loss" in hist and "val_loss" in hist)

def test_training_transferlearning(self):
model = NeuralNetwork(n_labels=4, channels=3, batch_queue_size=1, meta_variables=5)
model.tf_epochs = 2
hist = model.train(training_generator=self.datagen,
validation_generator=self.datagen,
epochs=3, transfer_learning=True)
self.assertTrue("tl_loss" in hist and "tl_val_loss" in hist)
self.assertTrue("ft_loss" in hist and "ft_val_loss" in hist)

#-------------------------------------------------#
# Model Inference #
#-------------------------------------------------#
def test_predict(self):
labels_temp = self.datagen.labels
model = NeuralNetwork(n_labels=4, channels=3, batch_queue_size=1, meta_variables=5)
hist = model.train(training_generator=self.datagen,
epochs=3)

self.datagen.labels=None
for i in range(0, 3):
batch = self.datagen[i]
self.assertTrue(len(batch), 2)
preds = model.predict(self.datagen)

self.assertTrue(preds.shape == (10, 4))
for i in range(0, 10):
self.assertTrue(np.sum(preds[i]) >= 0.99 and np.sum(preds[i]) <= 1.01)
self.datagen.labels=labels_temp
for i in range(0, 3):
batch = self.datagen[i]
self.assertTrue(len(batch), 2)
self.assertTrue(np.array_equal(batch[0][0].shape, (3, 32, 32, 3)))
self.datagen.labels=labels_temp
Loading