Skip to content

Commit

Permalink
timing bug (#145)
Browse files Browse the repository at this point in the history
* fix timing bug
  • Loading branch information
simplymathematics authored Nov 29, 2023
1 parent 39337c1 commit 54b74ee
Show file tree
Hide file tree
Showing 6 changed files with 103 additions and 91 deletions.
63 changes: 42 additions & 21 deletions deckard/base/attack/attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from pathlib import Path
import logging
from copy import deepcopy
from time import process_time_ns
from time import process_time_ns, time
from omegaconf import DictConfig, OmegaConf
from hydra.utils import instantiate
from art.utils import to_categorical, compute_success
Expand Down Expand Up @@ -154,6 +154,7 @@ def __call__(
kwargs.update({"y": data[2][: self.attack_size]})
if "AdversarialPatch" in self.name:
start = process_time_ns()
start_timestamp = time()
patches, _ = atk.generate(ben_samples, **kwargs)
samples = atk.apply_patch(
ben_samples,
Expand All @@ -162,14 +163,16 @@ def __call__(
)
else:
start = process_time_ns()
start_timestamp = time()
samples = atk.generate(ben_samples, **kwargs)
end = process_time_ns()
end_timestamp = time()
time_dict.update(
{
"adv_fit_time_per_sample": (end - start) / (len(samples) * 1e9),
"adv_fit_time": (end - start) / 1e9,
"adv_fit_start_time": start,
"adv_fit_stop_time": end,
"adv_fit_start_time": start_timestamp,
"adv_fit_stop_time": end_timestamp,
},
)
device = str(model.device) if hasattr(model, "device") else "cpu"
Expand Down Expand Up @@ -205,31 +208,34 @@ def __call__(
"predict_proba",
):
start = process_time_ns()
start_timestamp = time()
adv_probabilities = model.model.predict_proba(samples)
end = process_time_ns()
try:
start = process_time_ns()
start_timestamp = time()
adv_probabilities = model.predict_proba(samples)
end = process_time_ns()

end_timestamp = time()
except AttributeError:
start = process_time_ns()
start_timestamp = time()
adv_probabilities = model.predict(samples)
end = process_time_ns()
end_timestamp = time()
time_dict.update(
{
"adv_predict_time_per_sample": (end - start) / (len(samples) * 1e9),
"adv_predict_time": (end - start) / 1e9,
"adv_predict_start_time": start,
"adv_predict_stop_time": end,
"adv_predict_start_time": start_timestamp,
"adv_predict_stop_time": end_timestamp,
},
)
device = str(model.device) if hasattr(model, "device") else "cpu"
time_dict.update({"adv_predict_device": device})
results["adv_probabilities"] = np.array(adv_probabilities)
if adv_probabilities_file is not None:
self.data.save(adv_probabilities, adv_probabilities_file)

if adv_losses_file is not None and Path(adv_losses_file).exists():
adv_loss = self.data.load(adv_losses_file)
results["adv_losses"] = np.array(adv_loss)
Expand Down Expand Up @@ -324,12 +330,15 @@ def __call__(
)
try:
start = process_time_ns()
start_timestamp = time()
samples, _ = atk.poison(
x_trigger=x_trigger,
y_trigger=y_trigger,
x_train=x_train,
y_train=y_train,
)
end = process_time_ns()
end_timestamp = time()
except RuntimeError as e:
if "expected scalar type Long" in str(e):
# if hasattr(y_train, "type"):
Expand Down Expand Up @@ -359,21 +368,24 @@ def __call__(
attack_size=self.attack_size,
)
start = process_time_ns()
start_timestamp = time()
samples, _ = atk.poison(
x_trigger=x_trigger,
y_trigger=y_trigger,
x_train=x_train,
y_train=y_train,
)
end = process_time_ns()
end_timestamp = time()
else:
raise e
end = process_time_ns()

time_dict.update(
{
"adv_fit_time_per_sample": (end - start) / (len(samples) * 1e9),
"adv_fit_time": (end - start) / 1e9,
"adv_fit_start_time": start,
"adv_fit_stop_time": end,
"adv_fit_start_time": start_timestamp,
"adv_fit_stop_time": end_timestamp,
},
)
device = str(model.device) if hasattr(model, "device") else "cpu"
Expand All @@ -397,22 +409,28 @@ def __call__(
"predict_proba",
):
start = process_time_ns()
start_timestamp = time()
adv_probabilities = model.model.predict_proba(samples)
end = process_time_ns()
end_timestamp = time()
try:
start = process_time_ns()
start_timestamp = time()
adv_probabilities = model.predict_proba(samples)
end = process_time_ns()
end_timestamp = time()
except AttributeError:
start = process_time_ns()
start_timestamp = time()
adv_probabilities = model.predict(samples)
end = process_time_ns()
end_timestamp = time()
time_dict.update(
{
"adv_predict_time_per_sample": (end - start) / (len(samples) * 1e9),
"adv_predict_time": (end - start) / 1e9,
"adv_predict_start_time": start,
"adv_predict_stop_time": end,
"adv_predict_start_time": start_timestamp,
"adv_predict_stop_time": end_timestamp,
},
)
device = str(model.device) if hasattr(model, "device") else "cpu"
Expand Down Expand Up @@ -514,25 +532,24 @@ def __call__(
if "MembershipInferenceBlackBox" in self.name:
infer = self.kwargs.pop("infer", {})
fit = self.kwargs.pop("fit", {})
start = process_time_ns()
atk.fit(x=x_train, y=y_train, test_x=x_test, test_y=y_test, **fit)
end = process_time_ns()

x_train = data[0][: self.attack_size]
y_train = data[2][: self.attack_size]
x_test = data[1][: self.attack_size]
y_test = data[3][: self.attack_size]
start = process_time_ns()
start_timestamp = time()
preds = atk.infer(x_test, y_test, **infer)
end = process_time_ns()
end_timestamp = time()
else:
raise NotImplementedError(f"Attack {self.name} not implemented.")
time_dict.update(
{
"adv_fit_time_per_sample": (end - start) / (len(preds) * 1e9),
"adv_fit_time": (end - start) / 1e9,
"adv_fit_start_time": start,
"adv_fit_stop_time": end,
"adv_fit_start_time": start_timestamp,
"adv_fit_stop_time": end_timestamp,
},
)
device = str(model.device) if hasattr(model, "device") else "cpu"
Expand Down Expand Up @@ -606,20 +623,22 @@ def __call__(
attack = deepcopy(self.init)
attack = attack(model=model, attack_size=self.attack_size)
start = process_time_ns()
start_timestamp = time()
attacked_model = attack.extract(
x=data[0][: self.attack_size],
y=data[2][: self.attack_size],
thieved_classifier=thieved_classifier,
**kwargs,
)
end = process_time_ns()
end_timestamp = time()
time_dict.update(
{
"adv_fit_time_per_sample": (end - start)
/ (self.attack_size * 1e9),
"adv_fit_time": (end - start) / 1e9,
"adv_fit_start_time": start,
"adv_fit_stop_time": end,
"adv_fit_start_time": start_timestamp,
"adv_fit_stop_time": end_timestamp,
},
)
device = str(model.device) if hasattr(model, "device") else "cpu"
Expand All @@ -634,14 +653,16 @@ def __call__(
preds = self.data.load(adv_predictions_file)
else:
start = process_time_ns()
start_timestamp = time()
preds = attacked_model.predict(data[1][: self.attack_size])
end = process_time_ns()
end_timestamp = time()
time_dict.update(
{
"adv_predict_time_per_sample": (end - start) / (len(preds) * 1e9),
"adv_predict_time": (end - start) / 1e9,
"adv_predict_start_time": start,
"adv_predict_stop_time": end,
"adv_predict_start_time": start_timestamp,
"adv_predict_stop_time": end_timestamp,
},
)
device = str(model.device) if hasattr(model, "device") else "cpu"
Expand Down
46 changes: 36 additions & 10 deletions deckard/base/model/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import pickle
from dataclasses import dataclass, field, asdict, is_dataclass
from pathlib import Path
from time import process_time_ns
from time import process_time_ns, time
from typing import Union, Dict
from omegaconf import OmegaConf, DictConfig, ListConfig
from copy import deepcopy
Expand Down Expand Up @@ -131,24 +131,30 @@ def __call__(self, data: list, model: object, library=None):
raise NotImplementedError(f"Training library {library} not implemented")
try:
start = process_time_ns()
start_timestamp = time()
model.fit(data[0], data[2], **trainer)
end = process_time_ns() - start
end_timestamp = time()
except np.AxisError: # pragma: no cover
from art.utils import to_categorical

data[2] = to_categorical(data[2])
start = process_time_ns()
start_timestamp = time()
model.fit(data[0], data[2], **trainer)
end = process_time_ns() - start
end_timestamp = time()
except ValueError as e: # pragma: no cover
if "Shape of labels" in str(e):
from art.utils import to_categorical

nb_classes = len(np.unique(data[2]))
data[2] = to_categorical(data[2], nb_classes=nb_classes)
start = process_time_ns()
start_timestamp = time()
model.fit(data[0], data[2], **trainer)
end = process_time_ns() - start
end_timestamp = time()
else:
raise e
except AttributeError as e: # pragma: no cover
Expand All @@ -157,8 +163,10 @@ def __call__(self, data: list, model: object, library=None):
data[0] = np.array(data[0])
data[2] = np.array(data[2])
start = process_time_ns()
start_timestamp = time()
model.fit(data[0], data[2], **trainer)
end = process_time_ns() - start
end_timestamp = time()
except Exception as e:
raise e
except RuntimeError as e: # pragma: no cover
Expand All @@ -167,8 +175,10 @@ def __call__(self, data: list, model: object, library=None):

tf.config.run_functions_eagerly(True)
start = process_time_ns()
start_timestamp = time()
model.fit(data[0], data[2], **trainer)
end = process_time_ns() - start
end_timestamp = time()
elif "should be the same" in str(e).lower():
import torch

Expand All @@ -185,15 +195,17 @@ def __call__(self, data: list, model: object, library=None):
data[2].to(device)
model.model.to(device) if hasattr(model, "model") else model.to(device)
start = process_time_ns()
start_timestamp = time()
model.fit(data[0], data[2], **trainer)
end = process_time_ns() - start
end_timestamp = time()
else:
raise e
time_dict = {
"train_time": (start - end) / 1e9,
"train_time_per_sample": end / (len(data[0]) * 1e9),
"train_time_start": start,
"train_time_end": end,
"train_start_time": start_timestamp,
"train_end_time": end_timestamp,
"train_device": device,
}

Expand Down Expand Up @@ -550,7 +562,10 @@ def predict(self, data=None, model=None, predictions_file=None):
device = str(model.device) if hasattr(model, "device") else "cpu"
try:
start = process_time_ns()
start_timestamp = time()
predictions = model.predict(data[1])
end = process_time_ns() - start
end_timestamp = time()
except NotFittedError as e: # pragma: no cover
logger.warning(e)
logger.warning(f"Model {model} is not fitted. Fitting now.")
Expand All @@ -568,15 +583,16 @@ def predict(self, data=None, model=None, predictions_file=None):
logger.error(e)
raise e
end = process_time_ns() - start
end_timestamp = time()
if predictions_file is not None:
self.data.save(predictions, predictions_file)
return (
predictions,
{
"predict_time": (start - end) / 1e9,
"predict_time_per_sample": start - end / (len(data[0]) * 1e9),
"predict_start_time": start,
"predict_stop_time": end,
"predict_time_per_sample": (start - end) / (len(data[0]) * 1e9),
"predict_start_time": start_timestamp,
"predict_stop_time": end_timestamp,
"predict_device": device,
},
)
Expand Down Expand Up @@ -612,21 +628,25 @@ def predict_proba(self, data=None, model=None, probabilities_file=None):
)
elif hasattr(model, "predict_proba"):
start = process_time_ns()
start_timestamp = time()
predictions = model.predict_proba(data[1])
end = process_time_ns() - start
end_timestamp = time()
else:
start = process_time_ns()
start_timestamp = time()
predictions = model.predict(data[1])
end = process_time_ns() - start
end_timestamp = time()
if probabilities_file is not None:
self.data.save(predictions, probabilities_file)
return (
predictions,
{
"predict_proba_time": (start - end) / 1e9,
"predict_proba_time_per_sample": end / (len(data[0]) * 1e9),
"predict_proba_start_time": start,
"predict_proba_stop_time": end,
"predict_proba_start_time": start_timestamp,
"predict_proba_stop_time": end_timestamp,
"predict_proba_device": device,
},
)
Expand Down Expand Up @@ -661,16 +681,22 @@ def predict_log_loss(self, data, model, losses_file=None):
)
if hasattr(model, "predict_log_proba"):
start = process_time_ns()
start_timestamp = time()
predictions = model.predict_log_proba(data[1])
end = process_time_ns() - start
end_timestamp = time()
elif hasattr(model, "predict_proba"):
start = process_time_ns()
start_timestamp = time()
predictions = model.predict_proba(data[1])
end = process_time_ns() - start
end_timestamp = time()
elif hasattr(model, "predict"):
start = process_time_ns()
start_timestamp = time()
predictions = model.predict(data[1])
end = process_time_ns() - start
end_timestamp = time()
else: # pragma: no cover
raise ValueError(
f"Model {model} does not have a predict_log_proba or predict_proba method.",
Expand All @@ -682,8 +708,8 @@ def predict_log_loss(self, data, model, losses_file=None):
{
"predict_log_proba_time": (start - end) / 1e9,
"predict_log_proba_time_per_sample": end / (len(data[0]) * 1e9),
"predict_log_proba_start_time": start,
"predict_log_proba_stop_time": end,
"predict_log_proba_start_time": start_timestamp,
"predict_log_proba_stop_time": end_timestamp,
"predict_log_device": device,
},
)
Expand Down
Loading

0 comments on commit 54b74ee

Please sign in to comment.