Skip to content

Commit

Permalink
remove unused layers and switch some ops from tf to keras
Browse files Browse the repository at this point in the history
  • Loading branch information
henrysky committed Jan 3, 2024
1 parent 9f1ab82 commit ff27c10
Show file tree
Hide file tree
Showing 12 changed files with 141 additions and 1,166 deletions.
1 change: 0 additions & 1 deletion .github/workflows/ci_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ jobs:
pip install torch~=${{ matrix.TORCH_VER }}
pip install keras coveralls pydot graphviz pytest pytest-cov
pip install .
python -c "import keras; print('Keras Version: ', keras.__version__)"
- name: Cache test data
uses: actions/cache@v3
with:
Expand Down
48 changes: 0 additions & 48 deletions astroNN/models/SimpleBayesPolyNN.py

This file was deleted.

20 changes: 7 additions & 13 deletions astroNN/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def load_folder(folder=None):
# else try to import it from standard way
try:
astronn_model_obj = getattr(
importlib.import_module(f"astroNN.models"), identifier
importlib.import_module("astroNN.models"), identifier
)()
except ImportError:
# try to load custom model from CUSTOM_MODEL_PATH if none are working
Expand Down Expand Up @@ -186,9 +186,9 @@ def load_folder(folder=None):
# Must have parameter
astronn_model_obj._input_shape = parameter["input"]
astronn_model_obj._labels_shape = parameter["labels"]
if type(astronn_model_obj._input_shape) is not dict:
if not isinstance(astronn_model_obj._input_shape, dict):
astronn_model_obj._input_shape = {"input": astronn_model_obj._input_shape}
if type(astronn_model_obj._labels_shape) is not dict:
if not isinstance(astronn_model_obj._labels_shape, dict):
astronn_model_obj._labels_shape = {"output": astronn_model_obj._labels_shape}
astronn_model_obj.num_hidden = parameter["hidden"]
astronn_model_obj.input_norm_mode = parameter["input_norm_mode"]
Expand Down Expand Up @@ -313,16 +313,10 @@ def load_folder(folder=None):
# its weird that keras needs -> metrics[metric][0] instead of metrics[metric] likes losses
try:
try:
if version.parse(tf.__version__) >= version.parse("2.4.0"):
metrics = [
losses_lookup(_metric["config"]["fn"])
for _metric in metrics_raw[0]
]
else:
metrics = [
losses_lookup(metrics_raw[_metric][0])
for _metric in metrics_raw
]
metrics = [
losses_lookup(_metric["config"]["fn"])
for _metric in metrics_raw[0]
]
except TypeError:
metrics = [losses_lookup(metrics_raw[0])]
except:
Expand Down
46 changes: 1 addition & 45 deletions astroNN/models/misc_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from astroNN.models.base_bayesian_cnn import BayesianCNNBase
from astroNN.models.base_cnn import CNNBase
from astroNN.nn.layers import MCDropout, PolyFit
from astroNN.nn.layers import MCDropout
from astroNN.nn.losses import (
bayesian_binary_crossentropy_wrapper,
bayesian_binary_crossentropy_var_wrapper,
Expand Down Expand Up @@ -257,47 +257,3 @@ def model(self):
)

return model, model_prediction, output_loss, variance_loss


# noinspection PyCallingNonCallable
class SimplePolyNN(CNNBase):
"""
Class for Neural Network for Gaia Polynomial fitting
:History: 2018-Jul-23 - Written - Henry Leung (University of Toronto)
"""

def __init__(self, lr=0.005, init_w=None, use_xbias=False):
super().__init__()

self._implementation_version = "1.0"
self.max_epochs = 40
self.lr = lr
self.reduce_lr_epsilon = 0.00005
self.num_hidden = 3 # equals degree of polynomial to fit

self.reduce_lr_min = 1e-8
self.reduce_lr_patience = 2

self.input_norm_mode = 0
self.labels_norm_mode = 0
self.init_w = init_w
self.use_xbias = use_xbias
self.task = "regression"
self.targetname = ["unbiased_parallax"]

def model(self):
input_tensor = Input(shape=self._input_shape, name="input")
flattener = Flatten()(input_tensor)
output = PolyFit(
deg=self.num_hidden,
output_units=self._labels_shape,
use_xbias=self.use_xbias,
name="output",
init_w=self.init_w,
kernel_regularizer=regularizers.l2(self.l2),
)(flattener)

model = Model(inputs=input_tensor, outputs=output)

return model
42 changes: 0 additions & 42 deletions astroNN/nn/__init__.py
Original file line number Diff line number Diff line change
@@ -1,45 +1,3 @@
def reduce_var(x, axis=None, keepdims=False):
"""
Calculate variance using Tensorflow (as opposed to tf.nn.moment which return both variance and mean)
:param x: Data
:type x: tf.Tensor
:param axis: Axis
:type axis: int
:param keepdims: Keeping variance dimension as data or not
:type keepdims: boolean
:return: Variance
:rtype: tf.Tensor
:History: 2018-Mar-04 - Written - Henry Leung (University of Toronto)
"""
import tensorflow as tf

m = tf.reduce_mean(x, axis, True)
devs_squared = tf.square(x - m)
return tf.reduce_mean(devs_squared, axis, keepdims)


def intpow_avx2(x, n):
"""
Calculate integer power of float (including negative) even with Tensorflow compiled with AVX2 since --fast-math
compiler flag aggressively optimize float operation which is common with AVX2 flag
:param x: identifier
:type x: tf.Tensor
:param n: an integer power (a float will be casted to integer!!)
:type n: int
:return: powered float(s)
:rtype: tf.Tensor
:History: 2018-Aug-13 - Written - Henry Leung (University of Toronto)
"""
import tensorflow as tf

# expand inputs to prepare to be tiled
expanded_inputs = tf.expand_dims(x, 1)
# we want [1, self.n]
return tf.reduce_prod(tf.tile(expanded_inputs, [1, n]), axis=-1)


def nn_obj_lookup(identifier, module_obj=None, module_name="default_obj"):
"""
Lookup astroNN.nn function by name
Expand Down
Loading

0 comments on commit ff27c10

Please sign in to comment.