diff --git a/deckard/base/attack/attack.py b/deckard/base/attack/attack.py index 5df0748c..15ac8e8a 100644 --- a/deckard/base/attack/attack.py +++ b/deckard/base/attack/attack.py @@ -55,13 +55,13 @@ def __hash__(self): return int(my_hash(self), 16) def __call__(self, model=None, data=None, attack_size=-1): - logger.info(f"Fitting attack {self.name} with id: {self.__hash__()}") + logger.debug(f"Fitting attack {self.name} with id: {self.__hash__()}") name = self.name kwargs = deepcopy(self.kwargs) pop_list = ["extract", "poison", "evade", "reconstruct", "infer"] for thing in pop_list: kwargs.pop(thing, None) - logger.info(f"Initializing attack {name} with parameters {kwargs}") + logger.debug(f"Initializing attack {name} with parameters {kwargs}") if "x_train" in kwargs: assert ( data is not None @@ -91,7 +91,7 @@ def __call__(self, model=None, data=None, attack_size=-1): else: kwargs["y_val"] = y_test try: - logger.info("Attempting black-box attack.") + logger.debug("Attempting black-box attack.") config = {"_target_": name} config.update(**kwargs) attack = instantiate(config, model) @@ -134,7 +134,7 @@ def __init__( self.attack_size = attack_size self.init = AttackInitializer(model, name, **init) self.kwargs = kwargs - logger.info("Instantiating Attack with id: {}".format(self.__hash__())) + logger.debug("Instantiating Attack with id: {}".format(self.__hash__())) def __hash__(self): return int(my_hash(self), 16) @@ -300,7 +300,7 @@ def __init__( self.attack_size = attack_size self.init = AttackInitializer(model, name, **init) self.kwargs = kwargs - logger.info("Instantiating Attack with id: {}".format(self.__hash__())) + logger.debug("Instantiating Attack with id: {}".format(self.__hash__())) def __hash__(self): return int(my_hash(self), 16) @@ -493,7 +493,7 @@ def __init__( self.attack_size = attack_size self.init = AttackInitializer(model, name, **init) self.kwargs = kwargs - logger.info("Instantiating Attack with id: {}".format(self.__hash__())) + logger.debug("Instantiating Attack with id: {}".format(self.__hash__())) def __hash__(self): return int(my_hash(self), 16) @@ -618,7 +618,7 @@ def __init__( f"kwargs must be of type DictConfig or dict. Got {type(kwargs)}", ) self.kwargs = kwargs - logger.info("Instantiating Attack with id: {}".format(self.__hash__())) + logger.debug("Instantiating Attack with id: {}".format(self.__hash__())) def __hash__(self): return int(my_hash(self), 16) @@ -813,7 +813,7 @@ def __init__( kwargs.update(**kwargs.pop("kwargs")) self.kwargs = kwargs self.name = name if name is not None else my_hash(self) - logger.info("Instantiating Attack with id: {}".format(self.name)) + logger.debug("Instantiating Attack with id: {}".format(self.name)) def __call__( self, diff --git a/deckard/base/data/data.py b/deckard/base/data/data.py index 7b1859ea..bc7c7ea3 100644 --- a/deckard/base/data/data.py +++ b/deckard/base/data/data.py @@ -148,7 +148,7 @@ def save(self, data, filename): :param filename: str """ if filename is not None: - logger.info(f"Saving data to {filename}") + logger.debug(f"Saving data to {filename}") suffix = Path(filename).suffix Path(filename).parent.mkdir(parents=True, exist_ok=True) if isinstance(data, dict): diff --git a/deckard/base/data/generator.py b/deckard/base/data/generator.py index 5a6f69bb..0dedf98e 100644 --- a/deckard/base/data/generator.py +++ b/deckard/base/data/generator.py @@ -51,7 +51,7 @@ class SklearnDataGenerator: kwargs: dict = field(default_factory=dict) def __init__(self, name, **kwargs): - logger.info( + logger.debug( f"Instantiating {self.__class__.__name__} with name={name} and kwargs={kwargs}", ) self.name = name @@ -91,7 +91,7 @@ class TorchDataGenerator: kwargs: dict = field(default_factory=dict) def __init__(self, name, path=None, **kwargs): - logger.info( + logger.debug( f"Instantiating {self.__class__.__name__} with name={name} and kwargs={kwargs}", ) self.name = name @@ -179,7 +179,7 @@ class KerasDataGenerator: kwargs: dict = field(default_factory=dict) def __init__(self, name, **kwargs): - logger.info( + logger.debug( f"Instantiating {self.__class__.__name__} with name={name} and kwargs={kwargs}", ) self.name = name diff --git a/deckard/base/data/sampler.py b/deckard/base/data/sampler.py index 9f95c6a1..a13053ba 100644 --- a/deckard/base/data/sampler.py +++ b/deckard/base/data/sampler.py @@ -37,7 +37,7 @@ def __init__( self.time_series = time_series def __call__(self, X, y): - logger.info(f"Calling SklearnDataSampler with params {asdict(self)}") + logger.debug(f"Calling SklearnDataSampler with params {asdict(self)}") params = deepcopy(asdict(self)) stratify = params.pop("stratify", False) if stratify is True: diff --git a/deckard/base/data/sklearn_pipeline.py b/deckard/base/data/sklearn_pipeline.py index 7880a082..88e93a57 100644 --- a/deckard/base/data/sklearn_pipeline.py +++ b/deckard/base/data/sklearn_pipeline.py @@ -63,7 +63,7 @@ def __iter__(self): return iter(self.pipeline) def __call__(self, X_train, X_test, y_train, y_test): - logger.info( + logger.debug( "Calling SklearnDataPipeline with pipeline={}".format(self.pipeline), ) pipeline = deepcopy(self.pipeline) diff --git a/deckard/base/experiment/experiment.py b/deckard/base/experiment/experiment.py index 8384eeb0..b3559e90 100644 --- a/deckard/base/experiment/experiment.py +++ b/deckard/base/experiment/experiment.py @@ -118,7 +118,7 @@ def __call__(self, **kwargs): :type scorer: str :return: The score for the specified scorer or the status of the experiment if scorer=None (default). """ - logger.info("Running experiment with id: {}".format(self.get_name())) + logger.debug("Running experiment with id: {}".format(self.get_name())) # Setup files, data, and model files = deepcopy(self.files).get_filenames() @@ -236,8 +236,8 @@ def __call__(self, **kwargs): self.data.save(score_dict, files["score_dict_file"]) else: # pragma: no cover raise ValueError("Scorer is None. Please specify a scorer.") - logger.info(f"Score for id : {self.get_name()}: {score_dict}") - logger.info("Finished running experiment with id: {}".format(self.get_name())) + logger.debug(f"Score for id : {self.get_name()}: {score_dict}") + logger.debug("Finished running experiment with id: {}".format(self.get_name())) return score_dict def _set_name(self): diff --git a/deckard/base/model/art_pipeline.py b/deckard/base/model/art_pipeline.py index 8f5a75d4..4ced8fa8 100644 --- a/deckard/base/model/art_pipeline.py +++ b/deckard/base/model/art_pipeline.py @@ -75,7 +75,7 @@ def __call__(self): data = [torch.from_numpy(d).to(device) for d in data] data = [d.to(device) for d in data] model.to(device) - logger.info(f"Model moved to GPU: {device}") + logger.debug(f"Model moved to GPU: {device}") else: device = torch.device("cpu") model = TorchInitializer( diff --git a/deckard/base/model/model.py b/deckard/base/model/model.py index 06082735..09622957 100644 --- a/deckard/base/model/model.py +++ b/deckard/base/model/model.py @@ -70,7 +70,7 @@ def __init__(self, **kwargs): self.kwargs = kwargs def __call__(self, data: list, model: object, library=None): - logger.info(f"Training model {model} with fit params: {self.kwargs}") + logger.debug(f"Training model {model} with fit params: {self.kwargs}") device = str(model.device) if hasattr(model, "device") else "cpu" trainer = self.kwargs if library in sklearn_dict.keys(): @@ -91,7 +91,7 @@ def __call__(self, data: list, model: object, library=None): try: start = process_time_ns() start_timestamp = time() - logger.info(f"Fitting type(model): {type(model)} with kwargs {trainer}") + logger.debug(f"Fitting type(model): {type(model)} with kwargs {trainer}") model.fit(data[0], data[2], **trainer) end = process_time_ns() end_timestamp = time() diff --git a/deckard/base/model/tensorflow_models.py b/deckard/base/model/tensorflow_models.py index d9796d43..12277b39 100644 --- a/deckard/base/model/tensorflow_models.py +++ b/deckard/base/model/tensorflow_models.py @@ -44,7 +44,7 @@ def __call__(self): import tensorflow as tf tf.config.run_functions_eagerly(True) - logger.info(f"Initializing model {self.name} with kwargs {self.kwargs}") + logger.debug(f"Initializing model {self.name} with kwargs {self.kwargs}") if len(self.kwargs) > 0: config = {"class_name": self.name, "config": self.kwargs} else: @@ -159,7 +159,7 @@ def __call__(self): import tensorflow as tf tf.config.run_functions_eagerly(True) - logger.info(f"Initializing model {self.name} with kwargs {self.kwargs}") + logger.debug(f"Initializing model {self.name} with kwargs {self.kwargs}") if "kwargs" in self.kwargs: kwargs = self.kwargs.pop("kwargs", {}) params = self.kwargs @@ -184,7 +184,7 @@ def __call__(self): import tensorflow as tf tf.config.run_functions_eagerly(True) - logger.info(f"Initializing model {self.name} with kwargs {self.kwargs}") + logger.debug(f"Initializing model {self.name} with kwargs {self.kwargs}") if "kwargs" in self.kwargs: kwargs = self.kwargs.pop("kwargs", {}) params = self.kwargs @@ -296,7 +296,7 @@ def __init__(self, name, **kwargs): def __call__(self): import tensorflow as tf - logger.info(f"Initializing model {self.name} with kwargs {self.kwargs}") + logger.debug(f"Initializing model {self.name} with kwargs {self.kwargs}") if "kwargs" in self.kwargs: kwargs = self.kwargs.pop("kwargs", {}) params = self.kwargs diff --git a/deckard/base/model/torch_models.py b/deckard/base/model/torch_models.py index 748c6017..87d4ba7b 100644 --- a/deckard/base/model/torch_models.py +++ b/deckard/base/model/torch_models.py @@ -35,7 +35,7 @@ def __init__(self, name, **kwargs): self.kwargs = kwargs def __call__(self): - logger.info(f"Initializing model {self.name} with kwargs {self.kwargs}") + logger.debug(f"Initializing model {self.name} with kwargs {self.kwargs}") params = self.kwargs name = params.pop("_target_", self.name) dict_ = {"_target_": name} @@ -54,7 +54,7 @@ def __init__(self, name, **kwargs): self.kwargs = kwargs def __call__(self, model): - logger.info(f"Initializing model {self.name} with kwargs {self.kwargs}") + logger.debug(f"Initializing model {self.name} with kwargs {self.kwargs}") params = self.kwargs name = params.pop("_target_", self.name) dict_ = {"_target_": name}