From 0294c219125a8df7666659561ac06981d6e59410 Mon Sep 17 00:00:00 2001 From: Charlie Meyers Date: Wed, 29 Nov 2023 02:50:33 +0000 Subject: [PATCH 1/4] fix timing bug --- deckard/base/attack/attack.py | 59 +++++++++++++------- deckard/base/model/model.py | 43 ++++++++++---- examples/pytorch/cifar100/attacks.sh | 6 +- examples/pytorch/cifar100/conf/cifar100.yaml | 4 +- examples/pytorch/cifar100/dvc.lock | 35 +++++------- examples/pytorch/cifar100/dvc.yaml | 40 +++---------- 6 files changed, 96 insertions(+), 91 deletions(-) diff --git a/deckard/base/attack/attack.py b/deckard/base/attack/attack.py index 237b21ff..5a02e0ce 100644 --- a/deckard/base/attack/attack.py +++ b/deckard/base/attack/attack.py @@ -4,7 +4,7 @@ from pathlib import Path import logging from copy import deepcopy -from time import process_time_ns +from time import process_time_ns, time from omegaconf import DictConfig, OmegaConf from hydra.utils import instantiate from art.utils import to_categorical, compute_success @@ -154,6 +154,7 @@ def __call__( kwargs.update({"y": data[2][: self.attack_size]}) if "AdversarialPatch" in self.name: start = process_time_ns() + start_timestamp = time() patches, _ = atk.generate(ben_samples, **kwargs) samples = atk.apply_patch( ben_samples, @@ -162,14 +163,16 @@ def __call__( ) else: start = process_time_ns() + start_timestamp = time() samples = atk.generate(ben_samples, **kwargs) end = process_time_ns() + end_timestamp = time() time_dict.update( { "adv_fit_time_per_sample": (end - start) / (len(samples) * 1e9), "adv_fit_time": (end - start) / 1e9, - "adv_fit_start_time": start, - "adv_fit_stop_time": end, + "adv_fit_start_time": start_timestamp, + "adv_fit_stop_time": end_timestamp, }, ) device = str(model.device) if hasattr(model, "device") else "cpu" @@ -205,6 +208,7 @@ def __call__( "predict_proba", ): start = process_time_ns() + start_timestamp = time() adv_probabilities = model.model.predict_proba(samples) end = process_time_ns() try: @@ -216,12 +220,13 @@ def __call__( start = process_time_ns() adv_probabilities = model.predict(samples) end = process_time_ns() + end_timestamp = time() time_dict.update( { "adv_predict_time_per_sample": (end - start) / (len(samples) * 1e9), "adv_predict_time": (end - start) / 1e9, - "adv_predict_start_time": start, - "adv_predict_stop_time": end, + "adv_predict_start_time": start_timestamp, + "adv_predict_stop_time": end_timestamp, }, ) device = str(model.device) if hasattr(model, "device") else "cpu" @@ -229,7 +234,6 @@ def __call__( results["adv_probabilities"] = np.array(adv_probabilities) if adv_probabilities_file is not None: self.data.save(adv_probabilities, adv_probabilities_file) - if adv_losses_file is not None and Path(adv_losses_file).exists(): adv_loss = self.data.load(adv_losses_file) results["adv_losses"] = np.array(adv_loss) @@ -324,12 +328,15 @@ def __call__( ) try: start = process_time_ns() + start_timestamp = time() samples, _ = atk.poison( x_trigger=x_trigger, y_trigger=y_trigger, x_train=x_train, y_train=y_train, ) + end = process_time_ns() + end_timestamp = time() except RuntimeError as e: if "expected scalar type Long" in str(e): # if hasattr(y_train, "type"): @@ -359,21 +366,24 @@ def __call__( attack_size=self.attack_size, ) start = process_time_ns() + start_timestamp = time() samples, _ = atk.poison( x_trigger=x_trigger, y_trigger=y_trigger, x_train=x_train, y_train=y_train, ) + end = process_time_ns() + end_timestamp = time() else: raise e - end = process_time_ns() + time_dict.update( { "adv_fit_time_per_sample": (end - start) / (len(samples) * 1e9), "adv_fit_time": (end - start) / 1e9, - "adv_fit_start_time": start, - "adv_fit_stop_time": end, + "adv_fit_start_time": start_timestamp, + "adv_fit_stop_time": end_timestamp, }, ) device = str(model.device) if hasattr(model, "device") else "cpu" @@ -397,22 +407,28 @@ def __call__( "predict_proba", ): start = process_time_ns() + start_timestamp = time() adv_probabilities = model.model.predict_proba(samples) end = process_time_ns() + end_timestamp = time() try: start = process_time_ns() + start_timestamp = time() adv_probabilities = model.predict_proba(samples) end = process_time_ns() + end_timestamp = time() except AttributeError: start = process_time_ns() + start_timestamp = time() adv_probabilities = model.predict(samples) end = process_time_ns() + end_timestamp = time() time_dict.update( { "adv_predict_time_per_sample": (end - start) / (len(samples) * 1e9), "adv_predict_time": (end - start) / 1e9, - "adv_predict_start_time": start, - "adv_predict_stop_time": end, + "adv_predict_start_time": start_timestamp, + "adv_predict_stop_time": end_timestamp, }, ) device = str(model.device) if hasattr(model, "device") else "cpu" @@ -514,25 +530,24 @@ def __call__( if "MembershipInferenceBlackBox" in self.name: infer = self.kwargs.pop("infer", {}) fit = self.kwargs.pop("fit", {}) - start = process_time_ns() atk.fit(x=x_train, y=y_train, test_x=x_test, test_y=y_test, **fit) - end = process_time_ns() - x_train = data[0][: self.attack_size] y_train = data[2][: self.attack_size] x_test = data[1][: self.attack_size] y_test = data[3][: self.attack_size] start = process_time_ns() + start_timestamp = time() preds = atk.infer(x_test, y_test, **infer) end = process_time_ns() + end_timestamp = time() else: raise NotImplementedError(f"Attack {self.name} not implemented.") time_dict.update( { "adv_fit_time_per_sample": (end - start) / (len(preds) * 1e9), "adv_fit_time": (end - start) / 1e9, - "adv_fit_start_time": start, - "adv_fit_stop_time": end, + "adv_fit_start_time": start_timestamp, + "adv_fit_stop_time": end_timestamp, }, ) device = str(model.device) if hasattr(model, "device") else "cpu" @@ -606,6 +621,7 @@ def __call__( attack = deepcopy(self.init) attack = attack(model=model, attack_size=self.attack_size) start = process_time_ns() + start_timestamp = time() attacked_model = attack.extract( x=data[0][: self.attack_size], y=data[2][: self.attack_size], @@ -613,13 +629,14 @@ def __call__( **kwargs, ) end = process_time_ns() + end_timestamp = time() time_dict.update( { "adv_fit_time_per_sample": (end - start) / (self.attack_size * 1e9), "adv_fit_time": (end - start) / 1e9, - "adv_fit_start_time": start, - "adv_fit_stop_time": end, + "adv_fit_start_time": start_timestamp, + "adv_fit_stop_time": end_timestamp, }, ) device = str(model.device) if hasattr(model, "device") else "cpu" @@ -634,14 +651,16 @@ def __call__( preds = self.data.load(adv_predictions_file) else: start = process_time_ns() + start_timestamp = time() preds = attacked_model.predict(data[1][: self.attack_size]) end = process_time_ns() + end_timestamp = time() time_dict.update( { "adv_predict_time_per_sample": (end - start) / (len(preds) * 1e9), "adv_predict_time": (end - start) / 1e9, - "adv_predict_start_time": start, - "adv_predict_stop_time": end, + "adv_predict_start_time": start_timestamp, + "adv_predict_stop_time": end_timestamp, }, ) device = str(model.device) if hasattr(model, "device") else "cpu" diff --git a/deckard/base/model/model.py b/deckard/base/model/model.py index bd69bc4a..83722b94 100644 --- a/deckard/base/model/model.py +++ b/deckard/base/model/model.py @@ -2,7 +2,7 @@ import pickle from dataclasses import dataclass, field, asdict, is_dataclass from pathlib import Path -from time import process_time_ns +from time import process_time_ns, time from typing import Union, Dict from omegaconf import OmegaConf, DictConfig, ListConfig from copy import deepcopy @@ -138,8 +138,10 @@ def __call__(self, data: list, model: object, library=None): data[2] = to_categorical(data[2]) start = process_time_ns() + start_timestamp = time() model.fit(data[0], data[2], **trainer) end = process_time_ns() - start + end_timestamp = time() except ValueError as e: # pragma: no cover if "Shape of labels" in str(e): from art.utils import to_categorical @@ -147,8 +149,10 @@ def __call__(self, data: list, model: object, library=None): nb_classes = len(np.unique(data[2])) data[2] = to_categorical(data[2], nb_classes=nb_classes) start = process_time_ns() + start_timestamp = time() model.fit(data[0], data[2], **trainer) end = process_time_ns() - start + end_timestamp = time() else: raise e except AttributeError as e: # pragma: no cover @@ -157,8 +161,10 @@ def __call__(self, data: list, model: object, library=None): data[0] = np.array(data[0]) data[2] = np.array(data[2]) start = process_time_ns() + start_timestamp = time() model.fit(data[0], data[2], **trainer) end = process_time_ns() - start + end_timestamp = time() except Exception as e: raise e except RuntimeError as e: # pragma: no cover @@ -167,11 +173,12 @@ def __call__(self, data: list, model: object, library=None): tf.config.run_functions_eagerly(True) start = process_time_ns() + start_timestamp = time() model.fit(data[0], data[2], **trainer) end = process_time_ns() - start + end_timestamp = time() elif "should be the same" in str(e).lower(): import torch - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") data[0] = torch.from_numpy(data[0]) data[1] = torch.from_numpy(data[1]) @@ -185,15 +192,17 @@ def __call__(self, data: list, model: object, library=None): data[2].to(device) model.model.to(device) if hasattr(model, "model") else model.to(device) start = process_time_ns() + start_timestamp = time() model.fit(data[0], data[2], **trainer) end = process_time_ns() - start + end_timestamp = time() else: raise e time_dict = { "train_time": (start - end) / 1e9, "train_time_per_sample": end / (len(data[0]) * 1e9), - "train_time_start": start, - "train_time_end": end, + "train_start_time": start_timestamp, + "train_end_time": end_timestamp, "train_device": device, } @@ -550,6 +559,7 @@ def predict(self, data=None, model=None, predictions_file=None): device = str(model.device) if hasattr(model, "device") else "cpu" try: start = process_time_ns() + start_timestamp = time() predictions = model.predict(data[1]) except NotFittedError as e: # pragma: no cover logger.warning(e) @@ -568,15 +578,16 @@ def predict(self, data=None, model=None, predictions_file=None): logger.error(e) raise e end = process_time_ns() - start + end_timestamp = time() if predictions_file is not None: self.data.save(predictions, predictions_file) return ( predictions, { "predict_time": (start - end) / 1e9, - "predict_time_per_sample": start - end / (len(data[0]) * 1e9), - "predict_start_time": start, - "predict_stop_time": end, + "predict_time_per_sample": (start - end) / (len(data[0]) * 1e9), + "predict_start_time": start_timestamp, + "predict_stop_time": end_timestamp, "predict_device": device, }, ) @@ -612,12 +623,16 @@ def predict_proba(self, data=None, model=None, probabilities_file=None): ) elif hasattr(model, "predict_proba"): start = process_time_ns() + start_timestamp = time() predictions = model.predict_proba(data[1]) end = process_time_ns() - start + end_timestamp = time() else: start = process_time_ns() + start_timestamp = time() predictions = model.predict(data[1]) end = process_time_ns() - start + end_timestamp = time() if probabilities_file is not None: self.data.save(predictions, probabilities_file) return ( @@ -625,8 +640,8 @@ def predict_proba(self, data=None, model=None, probabilities_file=None): { "predict_proba_time": (start - end) / 1e9, "predict_proba_time_per_sample": end / (len(data[0]) * 1e9), - "predict_proba_start_time": start, - "predict_proba_stop_time": end, + "predict_proba_start_time": start_timestamp, + "predict_proba_stop_time": end_timestamp, "predict_proba_device": device, }, ) @@ -661,16 +676,22 @@ def predict_log_loss(self, data, model, losses_file=None): ) if hasattr(model, "predict_log_proba"): start = process_time_ns() + start_timestamp = time() predictions = model.predict_log_proba(data[1]) end = process_time_ns() - start + end_timestamp = time() elif hasattr(model, "predict_proba"): start = process_time_ns() + start_timestamp = time() predictions = model.predict_proba(data[1]) end = process_time_ns() - start + end_timestamp = time() elif hasattr(model, "predict"): start = process_time_ns() + start_timestamp = time() predictions = model.predict(data[1]) end = process_time_ns() - start + end_timestamp = time() else: # pragma: no cover raise ValueError( f"Model {model} does not have a predict_log_proba or predict_proba method.", @@ -682,8 +703,8 @@ def predict_log_loss(self, data, model, losses_file=None): { "predict_log_proba_time": (start - end) / 1e9, "predict_log_proba_time_per_sample": end / (len(data[0]) * 1e9), - "predict_log_proba_start_time": start, - "predict_log_proba_stop_time": end, + "predict_log_proba_start_time": start_timestamp, + "predict_log_proba_stop_time": end_timestamp, "predict_log_device": device, }, ) diff --git a/examples/pytorch/cifar100/attacks.sh b/examples/pytorch/cifar100/attacks.sh index 8ec1f079..b491f97d 100644 --- a/examples/pytorch/cifar100/attacks.sh +++ b/examples/pytorch/cifar100/attacks.sh @@ -3,16 +3,16 @@ # # This script is used to generate the attacks for the example. # Fast Gradient Method -bash models.sh attack=default ++attack.init.name=art.attacks.evasion.FastGradientMethod ++attack.init.eps=.001,.01,.1,.5,1 ++attack.init.norm=inf,1,2 ++attack.init.eps_step=.001,.003,.01 ++attack.init.batch_size=1024 stage=attack ++hydra.sweeper.study_name=fgm ++direction=maximize $@ +bash models.sh attack=default ++attack.init.name=art.attacks.evasion.FastGradientMethod ++attack.init.eps=.001,.01,.1,.5,1 ++attack.init.norm=inf ++attack.init.eps_step=.001,.003,.01 ++attack.init.batch_size=1024 stage=attack ++hydra.sweeper.study_name=fgm ++direction=maximize $@ # Projected Gradient Descent -bash models.sh attack=default ++attack.init.name=art.attacks.evasion.ProjectedGradientDescent ++attack.init.eps=.001,.01,.1,.5,1 ++attack.init.norm=inf,1,2 ++attack.init.eps_step=.001,.003,.01 ++attack.init.batch_size=1024 ++attack.init.max_iter=10 stage=attack ++hydra.sweeper.study_name=pgd ++direction=maximize $@ +bash models.sh attack=default ++attack.init.name=art.attacks.evasion.ProjectedGradientDescent ++attack.init.eps=.001,.01,.1,.5,1 ++attack.init.norm=inf ++attack.init.eps_step=.001,.003,.01 ++attack.init.batch_size=1024 ++attack.init.max_iter=10 stage=attack ++hydra.sweeper.study_name=pgd ++direction=maximize $@ # DeepFool bash models.sh attack=default ++attack.init.name=art.attacks.evasion.DeepFool ++attack.init.max_iter=10 ++attack.init.batch_size=1024 ++attack.init.nb_grads=1,3,5,10 stage=attack ++hydra.sweeper.study_name=deep ++direction=maximize $@ # HopSkipJump -bash models.sh attack=default ++attack.init.name=art.attacks.evasion.HopSkipJump ++attack.init.max_iter=1,3,5,10 ++attack.init.init_eval=10 ++attack.init.norm=inf,2 stage=attack ++hydra.sweeper.study_name=hsj ++direction=maximize $@ +bash models.sh attack=default ++attack.init.name=art.attacks.evasion.HopSkipJump ++attack.init.max_iter=1,3,5,10 ++attack.init.init_eval=10 ++attack.init.norm=inf stage=attack ++hydra.sweeper.study_name=hsj ++direction=maximize $@ # ##################################################### # PixelAttack diff --git a/examples/pytorch/cifar100/conf/cifar100.yaml b/examples/pytorch/cifar100/conf/cifar100.yaml index 01df936a..a0727d43 100644 --- a/examples/pytorch/cifar100/conf/cifar100.yaml +++ b/examples/pytorch/cifar100/conf/cifar100.yaml @@ -24,7 +24,7 @@ hydra: direction: ${direction} study_name: control storage: sqlite:///model.db - n_jobs: ${hydra.launcher.n_jobs} + n_jobs: 4 n_trials : 32 params: ++data.sample.random_state: choice(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) @@ -33,7 +33,7 @@ hydra: _target_: hydra_plugins.hydra_optuna_sweeper.optuna_sweeper.OptunaSweeper launcher: _target_: hydra_plugins.hydra_joblib_launcher.joblib_launcher.JoblibLauncher - n_jobs: 4 + n_jobs: 8 prefer : processes verbose: 10 timeout: null diff --git a/examples/pytorch/cifar100/dvc.lock b/examples/pytorch/cifar100/dvc.lock index a7d3feef..4b6d554b 100644 --- a/examples/pytorch/cifar100/dvc.lock +++ b/examples/pytorch/cifar100/dvc.lock @@ -84,7 +84,7 @@ stages: library: pytorch trainer: batch_size: 1024 - nb_epoch: 10 + nb_epoch: 1 scorers: _target_: deckard.base.scorer.ScorerDict accuracy: @@ -99,27 +99,18 @@ stages: - path: cifar100/data/data.pkl md5: 1070854e6c00fc787bc0fdfc82792fd6 size: 761280311 - - path: cifar100/models/model.optimizer.pt - md5: c13297479b0da6a736dfd1221ba175ac - size: 44989261 - - path: cifar100/models/model.pt - md5: d2e61603d2c2e3de37a441351b168efa - size: 44998157 - path: cifar100/reports/train/default/predictions.json - md5: d3714b8d6a66fb803936da52650f88ec - size: 24412355 + md5: b77183a81e2ea292115959c7240c76a4 + size: 24381529 - path: cifar100/reports/train/default/score_dict.json - md5: 07a51c660e5d2a980e6c1fbc3dce0c16 - size: 842 + md5: 522fb97dc8effce658539ee9a70251ab + size: 831 attack: cmd: python -m deckard.layers.experiment attack --config_file cifar100.yaml deps: - path: cifar100/data/data.pkl md5: 1070854e6c00fc787bc0fdfc82792fd6 size: 761280311 - - path: cifar100/models/model.pt - md5: d2e61603d2c2e3de37a441351b168efa - size: 44998157 params: params.yaml: attack: @@ -189,7 +180,7 @@ stages: library: pytorch trainer: batch_size: 1024 - nb_epoch: 10 + nb_epoch: 1 name: art.attacks.evasion.HopSkipJump method: evasion model: @@ -241,7 +232,7 @@ stages: library: pytorch trainer: batch_size: 1024 - nb_epoch: 10 + nb_epoch: 1 data: _target_: deckard.base.data.Data generate: @@ -322,7 +313,7 @@ stages: library: pytorch trainer: batch_size: 1024 - nb_epoch: 10 + nb_epoch: 1 scorers: _target_: deckard.base.scorer.ScorerDict accuracy: @@ -335,14 +326,14 @@ stages: name: sklearn.metrics.log_loss outs: - path: cifar100/attacks/attack.pkl - md5: b5d92a276b64215cbfbd6dbfb91c0d00 + md5: 5317760d3c6f266ece07523e98517d46 size: 123046 - path: cifar100/reports/attack/default/adv_predictions.json - md5: 36f3eb805d4c1d45846c9e6684e2adfe - size: 21009 + md5: 32d4798208a79862f9240dd4159b4754 + size: 21414 - path: cifar100/reports/attack/default/score_dict.json - md5: ddbabb9810f501b7bd027f511c11d526 - size: 1111 + md5: c39c3d51d82359b4908d6fe8271c5850 + size: 1094 attacks@ResNet18: cmd: bash attacks.sh ++attack.attack_size=100 ++model.init.name=torch_example.ResNet18 stage=attack ++hydra.sweeper.storage=sqlite:///cifar100/reports/attack/ResNet18.db diff --git a/examples/pytorch/cifar100/dvc.yaml b/examples/pytorch/cifar100/dvc.yaml index 2bd4ac3e..b4dd189f 100644 --- a/examples/pytorch/cifar100/dvc.yaml +++ b/examples/pytorch/cifar100/dvc.yaml @@ -8,12 +8,9 @@ stages: - files outs: - ${files.directory}/${files.data_dir}/${files.data_file}${files.data_type} - - ${files.directory}/${files.model_dir}/${files.model_file}${files.model_type} - - ${files.directory}/${files.model_dir}/${files.model_file}.optimizer${files.model_type} - # - ${files.directory}/${files.reports}/train/${files.name}/${files.params_file} - # - ${files.directory}/${files.reports}/train/${files.name}/${files.test_labels_file} # Omit to save space + # - ${files.directory}/${files.model_dir}/${files.model_file}${files.model_type} + # - ${files.directory}/${files.model_dir}/${files.model_file}.optimizer${files.model_type} - ${files.directory}/${files.reports}/train/${files.name}/${files.predictions_file} # logit outputs for our model - # - ${files.directory}/${files.reports}/train/${files.name}/${files.probabilities_file} # Omit to save space metrics: - ${files.directory}/${files.reports}/train/${files.name}/${files.score_dict_file} attack: @@ -27,36 +24,18 @@ stages: outs: - ${files.directory}/${files.attack_dir}/${files.attack_file}${files.attack_type} - ${files.directory}/${files.reports}/attack/${files.name}/${files.adv_predictions_file} - # - ${files.directory}/${files.reports}/attack/${files.name}/${files.params_file} deps: - ${files.directory}/${files.data_dir}/${files.data_file}${files.data_type} - - ${files.directory}/${files.model_dir}/${files.model_file}${files.model_type} metrics: - ${files.directory}/${files.reports}/attack/${files.name}/${files.score_dict_file} ############################################################################## - # models: # This is a loop over the ResNet models - # foreach: - # - ResNet18 - # # - ResNet34 - # # - ResNet50 - # # - ResNet101 - # # - ResNet152 - # do: # This script configures eazch defence - # cmd: bash models.sh ++model.init.name=torch_example.${item} stage=train ++hydra.sweeper.storage=sqlite:///${files.directory}/${files.reports}/train/${item}.db --config-name cifar100.yaml - # deps: - # - models.sh - # - ${files.directory}/${files.model_dir}/${files.model_file}${files.model_type} - # - ${files.directory}/${files.model_dir}/${files.model_file}.optimizer${files.model_type} - # outs: - # - ${files.directory}/${files.reports}/train/${item}.db: # This outputs a database file for each model - # cache: True - # persist: True + attacks: foreach: # This is a loop over the ResNet models - # - ResNet18 - # - ResNet34 - # - ResNet50 + - ResNet18 + - ResNet34 + - ResNet50 - ResNet101 - ResNet152 do: @@ -82,18 +61,13 @@ stages: - ${files.directory}/${files.reports}/${item}/ResNet34.db - ${files.directory}/${files.reports}/${item}/ResNet50.db - ${files.directory}/${files.reports}/${item}/ResNet101.db - # - ${files.directory}/${files.reports}/${item}/ResNet152.db + - ${files.directory}/${files.reports}/${item}/ResNet152.db outs: - ${files.directory}/${files.reports}/${item}.csv plot: cmd : python -m deckard.layers.plots --path ${files.directory}/plots/ --file ${files.directory}/${files.reports}/attack.csv -o data.csv deps: - ${files.directory}/${files.reports}/attack.csv - - ${files.directory}/${files.reports}/attack/ResNet18.db - # - ${files.directory}/${files.reports}/attack/ResNet34.db - # - ${files.directory}/${files.reports}/attack/ResNet50.db - # - ${files.directory}/${files.reports}/attack/ResNet101.db - # - ${files.directory}/${files.reports}/attack/ResNet152.db outs: - ${files.directory}/plots/data.csv afr: From 2cab18df1576b6d33176aa23dccc624b46b3cea6 Mon Sep 17 00:00:00 2001 From: Charlie Meyers Date: Wed, 29 Nov 2023 02:55:04 +0000 Subject: [PATCH 2/4] fix timing bug --- deckard/base/attack/attack.py | 4 +++- deckard/base/model/model.py | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/deckard/base/attack/attack.py b/deckard/base/attack/attack.py index 5a02e0ce..45d8e9b0 100644 --- a/deckard/base/attack/attack.py +++ b/deckard/base/attack/attack.py @@ -213,11 +213,13 @@ def __call__( end = process_time_ns() try: start = process_time_ns() + start_timestamp = time() adv_probabilities = model.predict_proba(samples) end = process_time_ns() - + end_timestamp = time() except AttributeError: start = process_time_ns() + start_timestamp = time() adv_probabilities = model.predict(samples) end = process_time_ns() end_timestamp = time() diff --git a/deckard/base/model/model.py b/deckard/base/model/model.py index 83722b94..3373d0a0 100644 --- a/deckard/base/model/model.py +++ b/deckard/base/model/model.py @@ -561,6 +561,8 @@ def predict(self, data=None, model=None, predictions_file=None): start = process_time_ns() start_timestamp = time() predictions = model.predict(data[1]) + end = process_time_ns() - start + end_timestamp = time() except NotFittedError as e: # pragma: no cover logger.warning(e) logger.warning(f"Model {model} is not fitted. Fitting now.") From 3eb34d7b553ec3297f7e9ab02da50d7b9fec31dc Mon Sep 17 00:00:00 2001 From: Charlie Meyers Date: Wed, 29 Nov 2023 02:58:50 +0000 Subject: [PATCH 3/4] linting --- deckard/base/attack/attack.py | 2 +- deckard/base/model/model.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/deckard/base/attack/attack.py b/deckard/base/attack/attack.py index 45d8e9b0..654aaf4a 100644 --- a/deckard/base/attack/attack.py +++ b/deckard/base/attack/attack.py @@ -379,7 +379,7 @@ def __call__( end_timestamp = time() else: raise e - + time_dict.update( { "adv_fit_time_per_sample": (end - start) / (len(samples) * 1e9), diff --git a/deckard/base/model/model.py b/deckard/base/model/model.py index 3373d0a0..c9d240b2 100644 --- a/deckard/base/model/model.py +++ b/deckard/base/model/model.py @@ -179,6 +179,7 @@ def __call__(self, data: list, model: object, library=None): end_timestamp = time() elif "should be the same" in str(e).lower(): import torch + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") data[0] = torch.from_numpy(data[0]) data[1] = torch.from_numpy(data[1]) From cf64d90fe1f0e96831577a4573fdfdf996225701 Mon Sep 17 00:00:00 2001 From: Charlie Meyers Date: Wed, 29 Nov 2023 12:32:55 +0000 Subject: [PATCH 4/4] fix timing bug --- deckard/base/model/model.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/deckard/base/model/model.py b/deckard/base/model/model.py index c9d240b2..e132405a 100644 --- a/deckard/base/model/model.py +++ b/deckard/base/model/model.py @@ -131,8 +131,10 @@ def __call__(self, data: list, model: object, library=None): raise NotImplementedError(f"Training library {library} not implemented") try: start = process_time_ns() + start_timestamp = time() model.fit(data[0], data[2], **trainer) end = process_time_ns() - start + end_timestamp = time() except np.AxisError: # pragma: no cover from art.utils import to_categorical