diff --git a/project/algorithms/example.py b/project/algorithms/example.py index 11536ed8..638dbfe5 100644 --- a/project/algorithms/example.py +++ b/project/algorithms/example.py @@ -20,8 +20,8 @@ from torch.optim.lr_scheduler import _LRScheduler from project.algorithms.callbacks.classification_metrics import ClassificationMetricsCallback -from project.configs.lr_scheduler import CosineAnnealingLRConfig -from project.configs.optimizer import AdamConfig +from project.configs.algorithm.lr_scheduler import CosineAnnealingLRConfig +from project.configs.algorithm.optimizer import AdamConfig from project.datamodules.image_classification import ImageClassificationDataModule logger = getLogger(__name__) @@ -133,6 +133,10 @@ def configure_callbacks(self) -> list[Callback]: # Log some classification metrics. (This callback adds some metrics on this module). ClassificationMetricsCallback.attach_to(self, num_classes=self.datamodule.num_classes) ) + if self.hp.lr_scheduler_frequency != 0: + from lightning.pytorch.callbacks import LearningRateMonitor + + callbacks.append(LearningRateMonitor()) if self.hp.early_stopping_patience != 0: # If early stopping is enabled, add a Callback for it: callbacks.append( diff --git a/project/configs/__init__.py b/project/configs/__init__.py index c8d8fc85..814117fe 100644 --- a/project/configs/__init__.py +++ b/project/configs/__init__.py @@ -2,12 +2,10 @@ from hydra.core.config_store import ConfigStore -from project.configs.algorithm import algorithm_store, populate_algorithm_store +from project.configs.algorithm import register_algorithm_configs from project.configs.config import Config from project.configs.datamodule import datamodule_store -from project.configs.lr_scheduler import lr_scheduler_store from project.configs.network import network_store -from project.configs.optimizer import optimizer_store from project.utils.env_vars import REPO_ROOTDIR, SLURM_JOB_ID, SLURM_TMPDIR cs = ConfigStore.instance() @@ -17,10 +15,7 @@ def add_configs_to_hydra_store(): datamodule_store.add_to_hydra_store() network_store.add_to_hydra_store() - optimizer_store.add_to_hydra_store() - lr_scheduler_store.add_to_hydra_store() - populate_algorithm_store() - algorithm_store.add_to_hydra_store() + register_algorithm_configs() # todo: move the algorithm_store.add_to_hydra_store() here? diff --git a/project/configs/algorithm/__init__.py b/project/configs/algorithm/__init__.py index b8c08718..de418e63 100644 --- a/project/configs/algorithm/__init__.py +++ b/project/configs/algorithm/__init__.py @@ -1,6 +1,9 @@ from hydra_zen import make_custom_builds_fn, store from hydra_zen.third_party.pydantic import pydantic_parser +from .lr_scheduler import lr_scheduler_store +from .optimizer import optimizer_store + builds_fn = make_custom_builds_fn( zen_partial=True, populate_full_signature=True, zen_wrappers=pydantic_parser ) @@ -15,10 +18,14 @@ algorithm_store = store(group="algorithm") -def populate_algorithm_store(): +def register_algorithm_configs(): # Note: import here to avoid circular imports. from project.algorithms import ExampleAlgorithm, JaxExample, NoOp - algorithm_store(builds_fn(ExampleAlgorithm), name="example_algo") + algorithm_store(builds_fn(ExampleAlgorithm), name="example") algorithm_store(builds_fn(NoOp), name="no_op") algorithm_store(builds_fn(JaxExample), name="jax_example") + + optimizer_store.add_to_hydra_store() + lr_scheduler_store.add_to_hydra_store() + algorithm_store.add_to_hydra_store() diff --git a/project/configs/algorithm/example_from_config.yaml b/project/configs/algorithm/example_from_config.yaml index 5557e38b..229103e1 100644 --- a/project/configs/algorithm/example_from_config.yaml +++ b/project/configs/algorithm/example_from_config.yaml @@ -1,7 +1,10 @@ defaults: - - optimizer/adam@hp.optimizer - - lr_scheduler/step_lr@hp.lr_scheduler + # Apply the `algorithm/optimizer/Adam` config at `hp.optimizer` in this config. + - optimizer/Adam@hp.optimizer + - lr_scheduler/StepLR@hp.lr_scheduler _target_: project.algorithms.example.ExampleAlgorithm _partial_: true hp: _target_: project.algorithms.example.ExampleAlgorithm.HParams + lr_scheduler: + step_size: 1 # Required argument for the StepLR scheduler. (reduce LR every {step_size} epochs) diff --git a/project/configs/algorithm/lr_scheduler/__init__.py b/project/configs/algorithm/lr_scheduler/__init__.py new file mode 100644 index 00000000..9ced2b9a --- /dev/null +++ b/project/configs/algorithm/lr_scheduler/__init__.py @@ -0,0 +1,63 @@ +import dataclasses +import inspect +from logging import getLogger as get_logger + +import torch +import torch.optim.lr_scheduler +from hydra_zen import make_custom_builds_fn, store + +logger = get_logger(__name__) + +builds_fn = make_custom_builds_fn(zen_partial=True, populate_full_signature=True) + +# LR Schedulers whose constructors have arguments with missing defaults have to be created manually, +# because we otherwise get some errors if we try to use them (e.g. T_max doesn't have a default.) + +CosineAnnealingLRConfig = builds_fn(torch.optim.lr_scheduler.CosineAnnealingLR, T_max="???") +StepLRConfig = builds_fn(torch.optim.lr_scheduler.StepLR, step_size="???") +lr_scheduler_store = store(group="algorithm/lr_scheduler") +lr_scheduler_store(StepLRConfig, name="StepLR") +lr_scheduler_store(CosineAnnealingLRConfig, name="CosineAnnealingLR") + + +# IDEA: Could be interesting to generate configs for any member of the torch.optimizer.lr_scheduler +# package dynamically (and store it)? +# def __getattr__(self, name: str): +# """""" + +_configs_defined_so_far = [k for k, v in locals().items() if dataclasses.is_dataclass(v)] +for scheduler_name, scheduler_type in [ + (_name, _obj) + for _name, _obj in vars(torch.optim.lr_scheduler).items() + if inspect.isclass(_obj) + and issubclass(_obj, torch.optim.lr_scheduler.LRScheduler) + and _obj is not torch.optim.lr_scheduler.LRScheduler +]: + _config_name = f"{scheduler_name}Config" + if _config_name in _configs_defined_so_far: + # We already have a hand-made config for this scheduler. Skip it. + continue + + _lr_scheduler_config = builds_fn(scheduler_type, zen_dataclass={"cls_name": _config_name}) + lr_scheduler_store(_lr_scheduler_config, name=scheduler_name) + logger.debug(f"Registering config for the {scheduler_type} LR scheduler.") + + +def __getattr__(config_name: str): + if not config_name.endswith("Config"): + raise AttributeError + scheduler_name = config_name.removesuffix("Config") + # the keys for the config store are tuples of the form (group, config_name) + group = "algorithm/lr_scheduler" + store_key = (group, scheduler_name) + if store_key in lr_scheduler_store[group]: + logger.debug(f"Dynamically retrieving the config for the {scheduler_name} LR scheduler.") + return lr_scheduler_store[store_key] + available_configs = sorted( + config_name for (_group, config_name) in lr_scheduler_store[group].keys() + ) + logger.error( + f"Unable to find the config for {scheduler_name=}. Available configs: {available_configs}." + ) + + raise AttributeError diff --git a/project/configs/algorithm/optimizer/__init__.py b/project/configs/algorithm/optimizer/__init__.py new file mode 100644 index 00000000..7c0a8557 --- /dev/null +++ b/project/configs/algorithm/optimizer/__init__.py @@ -0,0 +1,45 @@ +import inspect +from logging import getLogger as get_logger + +import torch +import torch.optim +from hydra_zen import make_custom_builds_fn, store + +logger = get_logger(__name__) +builds_fn = make_custom_builds_fn(zen_partial=True, populate_full_signature=True) + +optimizer_store = store(group="algorithm/optimizer") +# AdamConfig = builds_fn(torch.optim.Adam) +# SGDConfig = builds_fn(torch.optim.SGD) +# optimizer_store(AdamConfig, name="adam") +# optimizer_store(SGDConfig, name="sgd") + +for optimizer_name, optimizer_type in [ + (k, v) + for k, v in vars(torch.optim).items() + if inspect.isclass(v) + and issubclass(v, torch.optim.Optimizer) + and v is not torch.optim.Optimizer +]: + _algo_config = builds_fn(optimizer_type, zen_dataclass={"cls_name": f"{optimizer_name}Config"}) + optimizer_store(_algo_config, name=optimizer_name) + logger.debug(f"Registering config for the {optimizer_type} optimizer.") + + +def __getattr__(config_name: str): + if not config_name.endswith("Config"): + raise AttributeError + optimizer_name = config_name.removesuffix("Config") + # the keys for the config store are tuples of the form (group, config_name) + store_key = ("algorithm/optimizer", optimizer_name) + if store_key in optimizer_store["algorithm/optimizer"]: + logger.debug(f"Dynamically retrieving the config for the {optimizer_name} optimizer.") + return optimizer_store[store_key] + available_optimizers = sorted( + optimizer_name for (_, optimizer_name) in optimizer_store["algorithm/optimizer"].keys() + ) + logger.error( + f"Unable to find the config for optimizer {optimizer_name}. Available optimizers: {available_optimizers}." + ) + + raise AttributeError diff --git a/project/configs/optimizer/adamw.yaml b/project/configs/algorithm/optimizer/adamw.yaml similarity index 100% rename from project/configs/optimizer/adamw.yaml rename to project/configs/algorithm/optimizer/adamw.yaml diff --git a/project/configs/datamodule/vision.yaml b/project/configs/datamodule/vision.yaml index e3f10b79..561a36b1 100644 --- a/project/configs/datamodule/vision.yaml +++ b/project/configs/datamodule/vision.yaml @@ -1,3 +1,4 @@ +# todo: This config should not show up as an option on the command-line. _target_: project.datamodules.VisionDataModule data_dir: ${constant:DATA_DIR} num_workers: ${constant:NUM_WORKERS} diff --git a/project/configs/lr_scheduler/__init__.py b/project/configs/lr_scheduler/__init__.py deleted file mode 100644 index 1e8101c1..00000000 --- a/project/configs/lr_scheduler/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -import torch -import torch.optim.lr_scheduler -from hydra_zen import make_custom_builds_fn, store -from hydra_zen.third_party.pydantic import pydantic_parser - -builds_fn = make_custom_builds_fn( - zen_partial=True, populate_full_signature=True, zen_wrappers=pydantic_parser -) - -CosineAnnealingLRConfig = builds_fn(torch.optim.lr_scheduler.CosineAnnealingLR, T_max=85) -StepLRConfig = builds_fn(torch.optim.lr_scheduler.CosineAnnealingLR) -lr_scheduler_store = store(group="algorithm/lr_scheduler") -lr_scheduler_store(StepLRConfig, name="step_lr") -lr_scheduler_store(CosineAnnealingLRConfig, name="cosine_annealing_lr") - - -# IDEA: Could be interesting to generate configs for any member of the torch.optimizer.lr_scheduler -# package dynamically (and store it)? -# def __getattr__(self, name: str): -# """""" diff --git a/project/configs/optimizer/__init__.py b/project/configs/optimizer/__init__.py deleted file mode 100644 index 704b40e5..00000000 --- a/project/configs/optimizer/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -import torch -import torch.optim -from hydra_zen import make_custom_builds_fn, store -from hydra_zen.third_party.pydantic import pydantic_parser - -builds_fn = make_custom_builds_fn( - zen_partial=True, populate_full_signature=True, zen_wrappers=pydantic_parser -) - -optimizer_store = store(group="algorithm/optimizer") -AdamConfig = builds_fn(torch.optim.Adam) -SGDConfig = builds_fn(torch.optim.SGD) -optimizer_store(AdamConfig, name="adam") -optimizer_store(SGDConfig, name="sgd")