Skip to content

Commit

Permalink
Merge pull request #6 from icecube/UpdateToTensorflow2.16.1
Browse files Browse the repository at this point in the history
Updates to more recent python packages including tensorflow.
  • Loading branch information
mhuen authored Apr 22, 2024
2 parents a1df0ba + 3f6c6a4 commit b2a086c
Show file tree
Hide file tree
Showing 10 changed files with 146 additions and 44 deletions.
6 changes: 5 additions & 1 deletion dnn_reco/create_trafo_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,11 @@ def main(config_files):
)

with open(trafo_config_file, "w") as yaml_file:
yaml.dump(config, yaml_file, default_flow_style=False)
yaml.YAML(typ="full").dump(
config,
yaml_file,
default_flow_style=False,
)
data_transformer.save_trafo_model(config["trafo_model_path"])

# kill multiprocessing queues and workers
Expand Down
2 changes: 1 addition & 1 deletion dnn_reco/data_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def setup_with_config(self, config_file):
Description
"""
with open(config_file, "r") as stream:
config_meta = yaml.safe_load(stream)
config_meta = yaml.YAML(typ="safe", pure=True).load(stream)

self.label_names = config_meta["label_names"]
self.label_name_dict = config_meta["label_name_dict"]
Expand Down
12 changes: 7 additions & 5 deletions dnn_reco/export_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def main(config_files, output_folder, data_settings, logs):
"num_misc": data_handler.num_misc,
}
with open(os.path.join(output_folder, "config_meta_data.yaml"), "w") as f:
yaml.dump(meta_data, f, default_flow_style=False)
yaml.YAML(typ="full").dump(meta_data, f, default_flow_style=False)

# ------------------------------------
# Export package versions and git hash
Expand All @@ -183,7 +183,9 @@ def main(config_files, output_folder, data_settings, logs):
"pip_installed_packages": config["pip_installed_packages"],
}
with open(os.path.join(output_folder, "version_control.yaml"), "w") as f:
yaml.dump(version_control, f, default_flow_style=False)
yaml.YAML(typ="full").dump(
version_control, f, default_flow_style=False
)

# -------------------------------
# Export tensorflow training logs
Expand Down Expand Up @@ -213,15 +215,15 @@ def export_data_settings(data_settings, output_folder):
"""
try:
with open(data_settings, "r") as stream:
data_config = yaml.safe_load(stream)
data_config = yaml.YAML(typ="safe", pure=True).load(stream)
except Exception as e:
print(e)
print("Falling back to modified SafeLoader")
with open(data_settings, "r") as stream:
yaml.SafeLoader.add_constructor(
"tag:yaml.org,2002:python/unicode", lambda _, node: node.value
)
data_config = dict(yaml.safe_load(stream))
data_config = dict(yaml.YAML(typ="safe", pure=True).load(stream))

for k in [
"pulse_time_quantiles",
Expand Down Expand Up @@ -292,7 +294,7 @@ def export_data_settings(data_settings, output_folder):
with open(
os.path.join(output_folder, "config_data_settings.yaml"), "w"
) as f:
yaml.dump(data_settings, f, default_flow_style=False)
yaml.YAML(typ="full").dump(data_settings, f, default_flow_style=False)


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion dnn_reco/ic3/modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def Configure(self):
# ----------------------------------------------------------------
cfg_file = os.path.join(self._model_path, "config_data_settings.yaml")
with open(cfg_file, "r") as stream:
data_config = yaml.safe_load(stream)
data_config = yaml.YAML(typ="safe", pure=True).load(stream)

# Backwards compatibility for older exported models which did not
# include this setting. In this case the separated format, e.g.
Expand Down
12 changes: 7 additions & 5 deletions dnn_reco/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,9 @@ def _setup_training_config_saver(self):

# Load training iterations dict
if os.path.isfile(self._training_steps_file):
self._training_iterations_dict = yaml.safe_load(
open(self._training_steps_file)
)
with open(self._training_steps_file, "r") as stream:
yaml_loader = yaml.YAML(typ="safe", pure=True)
self._training_iterations_dict = yaml_loader.load(stream)
else:
misc.print_warning(
"Did not find {!r}. Creating new one".format(
Expand Down Expand Up @@ -1134,12 +1134,14 @@ def _save_training_config(self, iteration):
del training_config["tf_float_precision"]

with open(self._training_config_file, "w") as yaml_file:
yaml.dump(training_config, yaml_file, default_flow_style=False)
yaml.YAML(typ="full").dump(
training_config, yaml_file, default_flow_style=False
)

# update number of training iterations in training_steps.yaml
self._training_iterations_dict[self._training_step] = iteration
with open(self._training_steps_file, "w") as yaml_file:
yaml.dump(
yaml.YAML(typ="full").dump(
self._training_iterations_dict,
yaml_file,
default_flow_style=False,
Expand Down
30 changes: 26 additions & 4 deletions dnn_reco/modules/models/general_IC86_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,9 @@ def general_model_IC86(

# apply DOM dropout, split and reshape DeepCore input
X_IC78, X_DeepCore_upper, X_DeepCore_lower = preprocess_icecube_data(
is_training, shared_objects
is_training,
shared_objects,
seed=config["tf_random_seed"],
)

# -----------------------------------
Expand All @@ -111,6 +113,7 @@ def general_model_IC86(
name="Upper DeepCore",
method_list="convolution",
keep_prob=keep_prob_list[1],
seed=config["tf_random_seed"],
**config["conv_upper_DeepCore_settings"]
)

Expand All @@ -123,6 +126,7 @@ def general_model_IC86(
name="Lower DeepCore",
method_list="convolution",
keep_prob=keep_prob_list[1],
seed=config["tf_random_seed"],
**config["conv_lower_DeepCore_settings"]
)

Expand All @@ -135,6 +139,7 @@ def general_model_IC86(
is_training=is_training,
method_list="hex_convolution",
keep_prob=keep_prob_list[1],
seed=config["tf_random_seed"],
**config["conv_IC78_settings"]
)

Expand All @@ -159,7 +164,11 @@ def general_model_IC86(
)

# dropout
layer_flat = tf.nn.dropout(layer_flat, rate=1 - (keep_prob_list[2]))
layer_flat = tf.nn.dropout(
layer_flat,
rate=1 - (keep_prob_list[2]),
seed=config["tf_random_seed"],
)

# -----------------------------------
# fully connected layers
Expand All @@ -171,6 +180,7 @@ def general_model_IC86(
input=layer_flat,
keep_prob=keep_prob_list[3],
is_training=is_training,
seed=config["tf_random_seed"],
**fc_settings
)

Expand Down Expand Up @@ -306,6 +316,7 @@ def general_model_IC86(
input=unc_input,
is_training=is_training,
keep_prob=keep_prob_list[3],
seed=config["tf_random_seed"],
**fc_unc_settings
)
y_unc_pred_trafo = uncertainty_layers[-1]
Expand Down Expand Up @@ -382,7 +393,9 @@ def general_model_IC86_opt4(

# apply DOM dropout, split and reshape DeepCore input
X_IC78, X_DeepCore_upper, X_DeepCore_lower = preprocess_icecube_data(
is_training, shared_objects
is_training,
shared_objects,
seed=config["tf_random_seed"],
)

# -----------------------------------
Expand All @@ -394,6 +407,7 @@ def general_model_IC86_opt4(
name="Upper DeepCore",
method_list="convolution",
keep_prob=keep_prob_list[1],
seed=config["tf_random_seed"],
**config["conv_upper_DeepCore_settings"]
)

Expand All @@ -406,6 +420,7 @@ def general_model_IC86_opt4(
name="Lower DeepCore",
method_list="convolution",
keep_prob=keep_prob_list[1],
seed=config["tf_random_seed"],
**config["conv_lower_DeepCore_settings"]
)

Expand All @@ -418,6 +433,7 @@ def general_model_IC86_opt4(
is_training=is_training,
method_list="hex_convolution",
keep_prob=keep_prob_list[1],
seed=config["tf_random_seed"],
**config["conv_IC78_settings"]
)

Expand All @@ -442,7 +458,11 @@ def general_model_IC86_opt4(
)

# dropout
layer_flat = tf.nn.dropout(layer_flat, rate=1 - (keep_prob_list[2]))
layer_flat = tf.nn.dropout(
layer_flat,
rate=1 - (keep_prob_list[2]),
seed=config["tf_random_seed"],
)

# -----------------------------------
# fully connected layers
Expand All @@ -454,6 +474,7 @@ def general_model_IC86_opt4(
input=layer_flat,
keep_prob=keep_prob_list[3],
is_training=is_training,
seed=config["tf_random_seed"],
**fc_settings
)

Expand Down Expand Up @@ -632,6 +653,7 @@ def general_model_IC86_opt4(
input=tf.stop_gradient(layer_flat),
is_training=is_training,
keep_prob=keep_prob_list[3],
seed=config["tf_random_seed"],
**fc_unc_settings
)
y_unc_pred_trafo = uncertainty_layers[-1]
Expand Down
6 changes: 5 additions & 1 deletion dnn_reco/modules/models/utils/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import tensorflow as tf


def preprocess_icecube_data(is_training, shared_objects):
def preprocess_icecube_data(is_training, shared_objects, seed=None):
"""Performs some basic preprocessing of IceCube input data.
Applies drop out for whole DOMs.
Expand All @@ -20,6 +20,8 @@ def preprocess_icecube_data(is_training, shared_objects):
shared_objects : dict
A dictionary containing settings and objects that are shared and passed
on to sub modules.
seed : int, optional
Random seed for reproducibility.
Returns
-------
Expand Down Expand Up @@ -56,12 +58,14 @@ def preprocess_icecube_data(is_training, shared_objects):
X_IC78,
rate=1 - (keep_prob_list[0]),
noise_shape=noise_shape_IC78,
seed=seed,
)

X_DeepCore = tf.nn.dropout(
X_DeepCore,
rate=1 - (keep_prob_list[0]),
noise_shape=noise_shape_DeepCore,
seed=seed,
)

# -----------------------------------
Expand Down
4 changes: 3 additions & 1 deletion dnn_reco/setup_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,9 @@ def _setup_config(self):
else:
config_name += "__" + file_base_name

config_update = yaml.safe_load(open(config_file))
with open(config_file, "r") as stream:
config_update = yaml.YAML(typ="safe", pure=True).load(stream)

duplicates = set(new_config.keys()).intersection(
set(config_update.keys())
)
Expand Down
Loading

0 comments on commit b2a086c

Please sign in to comment.