From bb0849437bdac0cc67b06eb7069769efe26adb59 Mon Sep 17 00:00:00 2001 From: Charlie Meyers Date: Thu, 30 Nov 2023 23:55:54 +0000 Subject: [PATCH] fix scoring bug --- deckard/base/model/art_pipeline.py | 10 ++++++---- deckard/layers/optimise.py | 32 ++++++++++++++++++++---------- examples/pytorch/cifar100/dvc.lock | 18 ++++++++--------- 3 files changed, 37 insertions(+), 23 deletions(-) diff --git a/deckard/base/model/art_pipeline.py b/deckard/base/model/art_pipeline.py index d73fb0de..2c6b524b 100644 --- a/deckard/base/model/art_pipeline.py +++ b/deckard/base/model/art_pipeline.py @@ -6,6 +6,7 @@ from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf import numpy as np +from random import randint from .keras_models import KerasInitializer, keras_dict # noqa F401 from .tensorflow_models import ( # noqa F401 TensorflowV1Initializer, @@ -64,21 +65,22 @@ def __call__(self): tuple(torch_dict.values()), ): import torch - device_type = "gpu" if torch.cuda.is_available() else "cpu" if device_type == "gpu": logger.info("Using GPU") logger.info("Model moved to GPU") - device = torch.device("cuda") - model.to(device) + number_of_devices = torch.cuda.device_count() + num = randint(0, number_of_devices - 1) + device = torch.device(f"cuda:{num}") if isinstance(data[0][0], np.ndarray): data = [torch.from_numpy(d).to(device) for d in data] data = [d.to(device) for d in data] + model.to(device) model = TorchInitializer( data=data, model=model, library=library, - device_type=device_type, + device_type=device, **kwargs, )() elif "keras" in str(library) and not isinstance( diff --git a/deckard/layers/optimise.py b/deckard/layers/optimise.py index 691b97c8..72a5280c 100644 --- a/deckard/layers/optimise.py +++ b/deckard/layers/optimise.py @@ -269,7 +269,7 @@ def optimise(cfg: DictConfig) -> None: working_dir = Path(config_path).parent direction = cfg.get("direction", "minimize") direction = [direction] if not isinstance(direction, list) else direction - optimizers = cfg.get("optimizers") + optimizers = cfg.get("optimizers", None) stage = cfg.pop("stage", None) cfg = parse_stage(params=cfg, stage=stage, path=working_dir) exp = instantiate(cfg) @@ -280,9 +280,22 @@ def optimise(cfg: DictConfig) -> None: id_ = Path(files["score_dict_file"]).parent.name optimizers = [optimizers] if not isinstance(optimizers, list) else optimizers try: - scores = exp() - score = [v for k, v in scores.items() if k in optimizers] - logger.info(f"Score is : {score}") + score_dict = exp() + scores = [] + i = 0 + for optimizer in optimizers: + if optimizer in score_dict: + scores.append(score_dict[optimizer]) + else: + if direction[i] == "minimize": + scores.append(1e10) + elif direction[i] == "maximize": + scores.append(-1e10) + else: + scores.append(None) + i += 1 + logger.info(f"Optimizers are : {optimizers}") + logger.info(f"Score is : {scores}") except Exception as e: logger.warning( f"Exception {e} occured while running experiment {id_}. Setting score to default for specified direction (e.g. -/+ 1e10).", @@ -297,14 +310,13 @@ def optimise(cfg: DictConfig) -> None: elif direction == "maximize": fake_scores.append(-1e10) else: - raise ValueError(f"Unknown direction {direction}") - score = fake_scores - logger.info(f"Score: {score}") + fake_scores.append(None) + logger.info(f"Optimizers: {optimizers}") + logger.info(f"Score: {scores}") if raise_exception: raise e - if len(score) == 1: - score = score[0] - return score + scores = fake_scores + return scores if __name__ == "__main__": diff --git a/examples/pytorch/cifar100/dvc.lock b/examples/pytorch/cifar100/dvc.lock index fd146949..78753927 100644 --- a/examples/pytorch/cifar100/dvc.lock +++ b/examples/pytorch/cifar100/dvc.lock @@ -100,11 +100,11 @@ stages: md5: 1070854e6c00fc787bc0fdfc82792fd6 size: 761280311 - path: cifar100/reports/train/default/predictions.json - md5: bb21d60526f3a7d2f8f4f64df504a1f4 - size: 24415348 + md5: 406b5670c6fc9741adf82a3922f5d5f2 + size: 24401460 - path: cifar100/reports/train/default/score_dict.json - md5: fc2ab064fdf09c12751ed4f72af83bd7 - size: 901 + md5: 39cfcc1fc0d5bef2dafef2eb0295d934 + size: 885 attack: cmd: python -m deckard.layers.experiment attack --config_file cifar100.yaml deps: @@ -326,14 +326,14 @@ stages: name: sklearn.metrics.log_loss outs: - path: cifar100/attacks/attack.pkl - md5: 5317760d3c6f266ece07523e98517d46 + md5: a80777574854b5cefe05d26ca2a8ba7c size: 123046 - path: cifar100/reports/attack/default/adv_predictions.json - md5: 321ea49f7061832db3c5142f0ad7b042 - size: 21395 + md5: d116bdf64db4874f0681efc5622b007c + size: 21393 - path: cifar100/reports/attack/default/score_dict.json - md5: e0afe623a549e81ca7d44a65ce362ee2 - size: 1356 + md5: 73c82dda7d68f34ea2004e5bde1abd95 + size: 1327 attacks@ResNet18: cmd: bash attacks.sh ++attack.attack_size=100 ++model.init.name=torch_example.ResNet18 stage=attack ++hydra.sweeper.storage=sqlite:///cifar100/reports/attack/ResNet18.db