diff --git a/mmengine/tune/_report_hook.py b/mmengine/tune/_report_hook.py index eff4ad4565..a58d9a2431 100644 --- a/mmengine/tune/_report_hook.py +++ b/mmengine/tune/_report_hook.py @@ -58,10 +58,10 @@ def _should_stop(self, runner): runner (Runner): The runner of the training process. """ if self.tuning_iter is not None: - if runner.iter > self.tuning_iter: + if runner.iter + 1 >= self.tuning_iter: return True elif self.tuning_epoch is not None: - if runner.epoch > self.tuning_epoch: + if runner.epoch + 1 >= self.tuning_epoch: return True else: return False diff --git a/mmengine/tune/searchers/searcher.py b/mmengine/tune/searchers/searcher.py index 5aa4ef90f4..024f8fca0a 100644 --- a/mmengine/tune/searchers/searcher.py +++ b/mmengine/tune/searchers/searcher.py @@ -54,14 +54,18 @@ def _validate_hparam_spec(self, hparam_spec: Dict[str, Dict]): 'hparam_spec must have a key "type" and ' \ f'its value must be "discrete" or "continuous", but got {v}' if v['type'] == 'discrete': - assert 'values' in v, \ - 'if hparam_spec["type"] is "discrete", ' +\ - f'hparam_spec must have a key "values", but got {v}' + assert 'values' in v and isinstance(v['values'], list) and \ + v['values'], 'Expected a non-empty "values" list for ' + \ + 'discrete type, but got {v}' else: assert 'lower' in v and 'upper' in v, \ - 'if hparam_spec["type"] is "continuous", ' +\ - 'hparam_spec must have keys "lower" and "upper", ' +\ - f'but got {v}' + 'Expected keys "lower" and "upper" for continuous ' + \ + f'type, but got {v}' + assert isinstance(v['lower'], (int, float)) and \ + isinstance(v['upper'], (int, float)), \ + f'Expected "lower" and "upper" to be numbers, but got {v}' + assert v['lower'] < v['upper'], \ + f'Expected "lower" to be less than "upper", but got {v}' @property def hparam_spec(self) -> Dict[str, Dict]: diff --git a/tests/test_tune/test_report_hook.py b/tests/test_tune/test_report_hook.py new file mode 100644 index 0000000000..a50fc68d06 --- /dev/null +++ b/tests/test_tune/test_report_hook.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.testing import RunnerTestCase +from mmengine.tune._report_hook import ReportingHook +from unittest.mock import MagicMock + +class TestReportingHook(RunnerTestCase): + def test_append_score(self): + hook = ReportingHook(monitor='acc', max_scoreboard_len=3) + + # Adding scores to the scoreboard + hook._append_score(0.5) + hook._append_score(0.6) + hook._append_score(0.7) + self.assertEqual(hook.scoreboard, [0.5, 0.6, 0.7]) + + # When exceeding max length, it should pop the first item + hook._append_score(0.8) + self.assertEqual(hook.scoreboard, [0.6, 0.7, 0.8]) + + def test_should_stop(self): + runner = MagicMock(iter=3, epoch=1) + + # Test with tuning_iter + hook1 = ReportingHook(monitor='acc', tuning_iter=5) + self.assertFalse(hook1._should_stop(runner)) + runner.iter = 4 + self.assertTrue(hook1._should_stop(runner)) + + # Test with tuning_epoch + hook2 = ReportingHook(monitor='acc', tuning_epoch=3) + self.assertFalse(hook2._should_stop(runner)) + runner.epoch = 2 + self.assertTrue(hook2._should_stop(runner)) + + def test_report_score(self): + hook1 = ReportingHook(monitor='acc', report_op='latest') + hook1.scoreboard = [0.5, 0.6, 0.7] + self.assertEqual(hook1.report_score(), 0.7) + + hook2 = ReportingHook(monitor='acc', report_op='mean') + hook2.scoreboard = [0.5, 0.6, 0.7] + self.assertEqual(hook2.report_score(), 0.6) + + # Test with an empty scoreboard + hook3 = ReportingHook(monitor='acc', report_op='mean') + self.assertIsNone(hook3.report_score()) + + def test_clear(self): + hook = ReportingHook(monitor='acc') + hook.scoreboard = [0.5, 0.6, 0.7] + hook.clear() + self.assertEqual(hook.scoreboard, []) + + def test_after_train_iter(self): + runner = MagicMock(iter=3, epoch=1) + runner.log_processor.get_log_after_iter = MagicMock(return_value=({'acc': 0.9}, 'log_str')) + + # Check if the monitored score gets appended correctly + hook = ReportingHook(monitor='acc') + hook.after_train_iter(runner, 0) + self.assertEqual(hook.scoreboard[-1], 0.9) + + # Check if no score is appended for a non-existent metric + hook2 = ReportingHook(monitor='non_existent') + hook2.after_train_iter(runner, 0) + self.assertEqual(len(hook2.scoreboard), 0) + + # Check that training stops if tuning_iter is reached + runner.iter = 5 + hook3 = ReportingHook(monitor='acc', tuning_iter=5) + hook3.after_train_iter(runner, 0) + self.assertTrue(runner.train_loop.stop_training) + + def test_after_val_epoch(self): + runner = MagicMock(iter=3, epoch=1) + + # Check if the monitored score gets appended correctly from metrics + metrics = {'acc': 0.9} + hook = ReportingHook(monitor='acc') + hook.after_val_epoch(runner, metrics=metrics) + self.assertEqual(hook.scoreboard[-1], 0.9) + + # Check that no score is appended if the metric is missing from metrics + metrics = {'loss': 0.1} + hook2 = ReportingHook(monitor='acc') + hook2.after_val_epoch(runner, metrics=metrics) + self.assertEqual(len(hook2.scoreboard), 0) + + def test_with_runner(self): + runner = self.build_runner(self.epoch_based_cfg) + acc_hook = ReportingHook(monitor='test/acc', tuning_epoch=1) + runner.register_hook(acc_hook, priority='VERY_LOW') + runner.train() + self.assertEqual(runner.epoch, 1) + score = acc_hook.report_score() + self.assertAlmostEqual(score, 1) diff --git a/tests/test_tune/test_searchers/test_nevergrad.py b/tests/test_tune/test_searchers/test_nevergrad.py new file mode 100644 index 0000000000..3251d9c3d5 --- /dev/null +++ b/tests/test_tune/test_searchers/test_nevergrad.py @@ -0,0 +1,101 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase, skipIf +import random +from typing import List + +from mmengine.tune.searchers import NevergradSearcher + +try: + import nevergrad + NEVERGRAD_AVAILABLE = True +except ImportError: + NEVERGRAD_AVAILABLE = False + +@skipIf(not NEVERGRAD_AVAILABLE, "nevergrad is not installed") +class TestNevergradSearcher(TestCase): + def noisy_sphere_function(self, x: List[float]): + """Sphere function with noise: f(x) = sum(x_i^2) + noise""" + noise = random.gauss(0, 0.1) # Gaussian noise with mean 0 and std 0.1 + return sum([x_i ** 2 for x_i in x.values()]) + noise + + def one_max_function(self, x: List[int]): + """OneMax function: f(x) = sum(x_i) for binary x_i""" + return sum(x) + + @property + def target_solver_types(self): + return [ + 'OnePlusOne', 'CMA', 'BO', 'DE', 'PSO', 'NGO' + ] + + def test_hash_dict(self): + searcher = NevergradSearcher(rule='less', hparam_spec={}, num_trials=100, solver_type='OnePlusOne') + + # Check different dicts yield different hashes + d1 = {"x": 1, "y": 2} + d2 = {"x": 1, "y": 3} + self.assertNotEqual(searcher._hash_dict(d1), searcher._hash_dict(d2)) + + # Check same dict yields same hash + self.assertEqual(searcher._hash_dict(d1), searcher._hash_dict(d1)) + + # Check order doesn't matter + d3 = {"y": 2, "x": 1} + self.assertEqual(searcher._hash_dict(d1), searcher._hash_dict(d3)) + + def test_noisy_sphere_function(self): + hparam_continuous_space = { + 'x1': { + 'type': 'continuous', + 'lower': -5.0, + 'upper': 5.0 + }, + 'x2': { + 'type': 'continuous', + 'lower': -5.0, + 'upper': 5.0 + } + } + for solver_type in self.target_solver_types: + searcher = NevergradSearcher(rule='less', hparam_spec=hparam_continuous_space, num_trials=100, solver_type=solver_type) + for _ in range(100): + hparam = searcher.suggest() + score = self.noisy_sphere_function([v for _,v in hparam.items()]) + searcher.record(hparam, score) + # For the noisy sphere function, the optimal should be close to x1=0 and x2=0 + best_hparam = searcher.suggest() + self.assertAlmostEqual(best_hparam['x1'], 0.0, places=1) + self.assertAlmostEqual(best_hparam['x2'], 0.0, places=1) + + def test_one_max_function(self): + # Define the discrete search space for OneMax + hparam_discrete_space = { + 'x1': { + 'type': 'discrete', + 'values': [0, 1] + }, + 'x2': { + 'type': 'discrete', + 'values': [0, 1] + }, + 'x3': { + 'type': 'discrete', + 'values': [0, 1] + }, + 'x4': { + 'type': 'discrete', + 'values': [0, 1] + } + } + for solver_type in self.target_solver_types: + searcher = NevergradSearcher(rule='greater', hparam_spec=hparam_discrete_space, num_trials=100, solver_type=solver_type) + for _ in range(100): + hparam = searcher.suggest() + score = self.one_max_function([v for _,v in hparam.items()]) + searcher.record(hparam, score) + # For the OneMax function, the optimal solution is x1=x2=x3=x4=1 + best_hparam = searcher.suggest() + self.assertEqual(best_hparam['x1'], 1) + self.assertEqual(best_hparam['x2'], 1) + self.assertEqual(best_hparam['x3'], 1) + self.assertEqual(best_hparam['x4'], 1) \ No newline at end of file diff --git a/tests/test_tune/test_searchers/test_searcher.py b/tests/test_tune/test_searchers/test_searcher.py new file mode 100644 index 0000000000..ca814cb4ed --- /dev/null +++ b/tests/test_tune/test_searchers/test_searcher.py @@ -0,0 +1,86 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +from mmengine.tune.searchers import Searcher + +class TestSearcher(TestCase): + + def test_rule(self): + valid_hparam_spec_1 = { + 'lr': { + 'type': 'discrete', + 'values': [0.01, 0.02, 0.03] + } + } + # Invalid cases + with self.assertRaises(AssertionError): + Searcher(rule='invalid_rule', hparam_spec=valid_hparam_spec_1) + Searcher(rule='greater', hparam_spec=valid_hparam_spec_1) + Searcher(rule='less', hparam_spec=valid_hparam_spec_1) + + def test_validate_hparam_spec(self): + # Unknown hparam spec type + invalid_hparam_spec_1 = { + 'lr': { + 'type': 'unknown_type', + 'values': [0.01, 0.02, 0.03] + } + } + with self.assertRaises(AssertionError): + Searcher(rule='greater', hparam_spec=invalid_hparam_spec_1) + + # Missing keys in continuous hparam_spec + invalid_hparam_spec_2 = { + 'lr': { + 'type': 'continuous', + 'lower': 0.01 + } + } + with self.assertRaises(AssertionError): + Searcher(rule='less', hparam_spec=invalid_hparam_spec_2) + + # Invalid discrete hparam_spec + invalid_hparam_spec_3 = { + 'lr': { + 'type': 'discrete', + 'values': [] # Empty list + } + } + with self.assertRaises(AssertionError): + Searcher(rule='greater', hparam_spec=invalid_hparam_spec_3) + + # Invalid continuous hparam_spec + invalid_hparam_spec_4 = { + 'lr': { + 'type': 'continuous', + 'lower': 0.1, + 'upper': 0.01 # lower is greater than upper + } + } + with self.assertRaises(AssertionError): + Searcher(rule='less', hparam_spec=invalid_hparam_spec_4) + + # Invalid data type in continuous hparam_spec + invalid_hparam_spec_5 = { + 'lr': { + 'type': 'continuous', + 'lower': '0.01', # String instead of number + 'upper': 0.1 + } + } + with self.assertRaises(AssertionError): + Searcher(rule='less', hparam_spec=invalid_hparam_spec_5) + + def test_hparam_spec_property(self): + hparam_spec = { + 'lr': { + 'type': 'discrete', + 'values': [0.01, 0.02, 0.03] + } + } + searcher = Searcher(rule='greater', hparam_spec=hparam_spec) + self.assertEqual(searcher.hparam_spec, hparam_spec) + + def test_rule_property(self): + searcher = Searcher(rule='greater', hparam_spec={}) + self.assertEqual(searcher.rule, 'greater') \ No newline at end of file diff --git a/tests/test_tune/test_tuner.py b/tests/test_tune/test_tuner.py new file mode 100644 index 0000000000..4b5eb04397 --- /dev/null +++ b/tests/test_tune/test_tuner.py @@ -0,0 +1,226 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict +from mmengine.testing import RunnerTestCase +from mmengine.tune import Tuner +from mmengine.tune.searchers import Searcher, HYPER_SEARCHERS +from unittest import mock + +import random + +class ToySearcher(Searcher): + def suggest(self) -> Dict: + hparam = dict() + for k, v in self.hparam_spec.items(): + if v['type'] == 'discrete': + hparam[k] = random.choice(v['values']) + else: + hparam[k] = random.uniform(v['lower'], v['upper']) + return hparam + +class TestTuner(RunnerTestCase): + def setUp(self) -> None: + super().setUp() + HYPER_SEARCHERS.register_module(ToySearcher) + self.hparam_spec = { + 'optim_wrapper.optimizer.lr': { + 'type': 'discrete', + 'values': [0.1, 0.2, 0.3] + } + } + + def tearDown(self): + super().tearDown() + HYPER_SEARCHERS.module_dict.pop('ToySearcher', None) + + def test_init(self): + with self.assertRaises(ValueError): + Tuner( + runner_cfg=dict(), + hparam_spec=dict(), + monitor='loss', + rule='invalid_rule', + searcher_cfg=dict(type='ToySearcher')) + + # Initializing with correct parameters + tuner = Tuner( + runner_cfg=self.epoch_based_cfg, + hparam_spec=self.hparam_spec, + monitor='loss', + rule='less', + num_trials=2, + searcher_cfg=dict(type='ToySearcher') + ) + + # Verify the properties + self.assertEqual(tuner.epoch_based_cfg, self.epoch_based_cfg) + self.assertEqual(tuner.hparam_spec, self.hparam_spec) + self.assertEqual(tuner.monitor, 'loss') + self.assertEqual(tuner.rule, 'less') + self.assertEqual(tuner.num_trials, 2) + + # Ensure a searcher of type ToySearcher is used + self.assertIsInstance(tuner.searcher, ToySearcher) + + + def mock_is_main_process(self, return_value=True): + return mock.patch('mmengine.dist.is_main_process', return_value=return_value) + + def mock_broadcast(self, side_effect=None): + return mock.patch('mmengine.dist.broadcast_object_list', side_effect=side_effect) + + def test_inject_config(self): + # Inject into a single level + cfg = {'a': 1} + Tuner.inject_config(cfg, 'a', 2) + self.assertEqual(cfg['a'], 2) + + # Inject into a nested level + cfg = { + 'level1': { + 'level2': { + 'level3': 3 + } + } + } + Tuner.inject_config(cfg, 'level1.level2.level3', 4) + self.assertEqual(cfg['level1']['level2']['level3'], 4) + + # Inject into a non-existent key + cfg = {} + with self.assertRaises(AssertionError): + Tuner.inject_config(cfg, 'a', 1) + + # Inject into a sequence + cfg = { + 'sequence': [1, 2, 3] + } + Tuner.inject_config(cfg, 'sequence.1', 5) + self.assertEqual(cfg['sequence'][1], 5) + + @mock.patch('mmengine.runner.Runner.train') + @mock.patch('mmengine.tune._report_hook.ReportingHook.report_score') + def test_successful_run(self, mock_report_score, mock_train): + tuner = Tuner( + runner_cfg=self.epoch_based_cfg, + hparam_spec=self.hparam_spec, + monitor='loss', + rule='less', + num_trials=2, + searcher_cfg=dict(type='ToySearcher') + ) + + tuner.searcher.suggest = mock.MagicMock(return_value={'optim_wrapper.optimizer.lr': 0.1}) + tuner.searcher.record = mock.MagicMock() + + mock_report_score.return_value = 0.05 + + with self.mock_is_main_process(), self.mock_broadcast(): + hparam, score, error = tuner._run_trial() + + self.assertEqual(hparam, {'optim_wrapper.optimizer.lr': 0.1}) + self.assertEqual(score, 0.05) + self.assertIsNone(error) + tuner.searcher.record.assert_called_with({'optim_wrapper.optimizer.lr': 0.1}, 0.05) + + @mock.patch('mmengine.runner.Runner.train') + @mock.patch('mmengine.tune._report_hook.ReportingHook.report_score') + def test_successful_run(self, mock_report_score, mock_train): + mock_train.side_effect = Exception("Error during training") + + tuner = Tuner( + runner_cfg=self.epoch_based_cfg, + hparam_spec=self.hparam_spec, + monitor='loss', + rule='less', + num_trials=2, + searcher_cfg=dict(type='ToySearcher') + ) + + tuner.searcher.suggest = mock.MagicMock(return_value={'optim_wrapper.optimizer.lr': 0.1}) + tuner.searcher.record = mock.MagicMock() + + with self.mock_is_main_process(), self.mock_broadcast(): + hparam, score, error = tuner._run_trial() + + self.assertEqual(hparam, {'optim_wrapper.optimizer.lr': 0.1}) + self.assertEqual(score, float('inf')) + self.assertTrue(isinstance(error, Exception)) + tuner.searcher.record.assert_called_with({'optim_wrapper.optimizer.lr': 0.1}, float('inf')) + + @mock.patch('mmengine.runner.Runner.train') + @mock.patch('mmengine.tune._report_hook.ReportingHook.report_score') + def test_tune_method(self, mock_report_score, mock_train): + mock_scores = [0.05, 0.03, 0.04, 0.06] + mock_hparams = [ + {'optim_wrapper.optimizer.lr': 0.1}, + {'optim_wrapper.optimizer.lr': 0.05}, + {'optim_wrapper.optimizer.lr': 0.2}, + {'optim_wrapper.optimizer.lr': 0.3} + ] + + mock_report_score.side_effect = mock_scores + + tuner = Tuner( + runner_cfg=self.epoch_based_cfg, + hparam_spec=self.hparam_spec, + monitor='loss', + rule='less', + num_trials=4, + searcher_cfg=dict(type='ToySearcher') + ) + + mock_run_trial_return_values = [ + (mock_hparams[0], mock_scores[0], None), + (mock_hparams[1], mock_scores[1], Exception("Error during training")), + (mock_hparams[2], mock_scores[2], None), + (mock_hparams[3], mock_scores[3], None) + ] + tuner._run_trial = mock.MagicMock(side_effect=mock_run_trial_return_values) + + with self.mock_is_main_process(), self.mock_broadcast(): + result = tuner.tune() + + self.assertEqual(tuner._history, [(mock_hparams[0], mock_scores[0]), (mock_hparams[2], mock_scores[2]), (mock_hparams[3], mock_scores[3])]) + + self.assertEqual(result, { + 'hparam': mock_hparams[2], + 'score': mock_scores[2] + }) + + tuner.rule = 'greater' + with self.mock_is_main_process(), self.mock_broadcast(): + result = tuner.tune() + self.assertEqual(result, { + 'hparam': mock_hparams[3], + 'score': mock_scores[3] + }) + + def test_clear(self): + tuner = Tuner( + runner_cfg=self.epoch_based_cfg, + hparam_spec=self.hparam_spec, + monitor='loss', + rule='less', + num_trials=2, + searcher_cfg=dict(type='ToySearcher') + ) + + tuner.history.append({'optim_wrapper.optimizer.lr': 0.1}, 0.05) + tuner.clear() + self.assertEqual(tuner.history, []) + + def test_with_runner(self): + tuner = Tuner( + runner_cfg=self.epoch_based_cfg, + hparam_spec=self.hparam_spec, + monitor='test/acc', + rule='greater', + num_trials=10, + searcher_cfg=dict(type='ToySearcher') + ) + + with self.mock_is_main_process(), self.mock_broadcast(): + result = tuner.tune() + + self.assertTrue( set([hparam['optim_wrapper.optimizer.lr'] for hparam, _ in tuner.history]) in self.hparam_spec['optim_wrapper.optimizer.lr']['values'] ) + self.assertEqual(result['score'], 1)