diff --git a/CHANGELOG.md b/CHANGELOG.md index e1ee1aa59f..ffdda31c68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## Improvements - Add logger information on handling of stopIteration error (#960) +- Replace deprecated ConfigSpace methods (#1139) # 2.2.0 diff --git a/smac/acquisition/maximizer/local_search.py b/smac/acquisition/maximizer/local_search.py index 297c032a22..ff0d4b3733 100644 --- a/smac/acquisition/maximizer/local_search.py +++ b/smac/acquisition/maximizer/local_search.py @@ -153,7 +153,10 @@ def _get_initial_points( init_points = [] n_init_points = n_points if len(previous_configs) < n_points: - sampled_points = self._configspace.sample_configuration(size=n_points - len(previous_configs)) + if n_points - len(previous_configs) == 1: + sampled_points = [self._configspace.sample_configuration()] + else: + sampled_points = self._configspace.sample_configuration(size=n_points - len(previous_configs)) n_init_points = len(previous_configs) if not isinstance(sampled_points, list): sampled_points = [sampled_points] diff --git a/smac/acquisition/maximizer/random_search.py b/smac/acquisition/maximizer/random_search.py index c7a724cb90..c5f87fda06 100644 --- a/smac/acquisition/maximizer/random_search.py +++ b/smac/acquisition/maximizer/random_search.py @@ -41,7 +41,7 @@ def _maximize( if n_points > 1: rand_configs = self._configspace.sample_configuration(size=n_points) else: - rand_configs = [self._configspace.sample_configuration(size=1)] + rand_configs = [self._configspace.sample_configuration()] if _sorted: for i in range(len(rand_configs)): diff --git a/smac/facade/blackbox_facade.py b/smac/facade/blackbox_facade.py index a5e1a18ab5..a36a44f819 100644 --- a/smac/facade/blackbox_facade.py +++ b/smac/facade/blackbox_facade.py @@ -112,11 +112,11 @@ def get_kernel(scenario: Scenario) -> kernels.Kernel: cont_dims = np.where(np.array(types) == 0)[0] cat_dims = np.where(np.array(types) != 0)[0] - if (len(cont_dims) + len(cat_dims)) != len(scenario.configspace.get_hyperparameters()): + if (len(cont_dims) + len(cat_dims)) != len(list(scenario.configspace.values())): raise ValueError( "The inferred number of continuous and categorical hyperparameters " "must equal the total number of hyperparameters. Got " - f"{(len(cont_dims) + len(cat_dims))} != {len(scenario.configspace.get_hyperparameters())}." + f"{(len(cont_dims) + len(cat_dims))} != {len(list(scenario.configspace.values()))}." ) # Constant Kernel diff --git a/smac/initial_design/abstract_initial_design.py b/smac/initial_design/abstract_initial_design.py index d561a6772f..4b52249338 100644 --- a/smac/initial_design/abstract_initial_design.py +++ b/smac/initial_design/abstract_initial_design.py @@ -77,7 +77,7 @@ def __init__( self._additional_configs = additional_configs - n_params = len(self._configspace.get_hyperparameters()) + n_params = len(list(self._configspace.values())) if n_configs is not None: logger.info("Using `n_configs` and ignoring `n_configs_per_hyperparameter`.") self._n_configs = n_configs @@ -174,10 +174,10 @@ def _transform_continuous_designs( configs : list[Configuration] Continuous transformed configs. """ - params = configspace.get_hyperparameters() + params = list(configspace.values()) for idx, param in enumerate(params): if isinstance(param, IntegerHyperparameter): - design[:, idx] = param._inverse_transform(param._transform(design[:, idx])) + design[:, idx] = param.to_vector(param.to_value(design[:, idx])) elif isinstance(param, NumericalHyperparameter): continue elif isinstance(param, Constant): diff --git a/smac/initial_design/factorial_design.py b/smac/initial_design/factorial_design.py index 6eb97ef16d..81ea4a71b2 100644 --- a/smac/initial_design/factorial_design.py +++ b/smac/initial_design/factorial_design.py @@ -22,7 +22,7 @@ class FactorialInitialDesign(AbstractInitialDesign): """Factorial initial design to select corner and middle configurations.""" def _select_configurations(self) -> list[Configuration]: - params = self._configspace.get_hyperparameters() + params = list(self._configspace.values()) values = [] mid = [] diff --git a/smac/initial_design/latin_hypercube_design.py b/smac/initial_design/latin_hypercube_design.py index ce760c9be1..8cd3cf2a9c 100644 --- a/smac/initial_design/latin_hypercube_design.py +++ b/smac/initial_design/latin_hypercube_design.py @@ -16,7 +16,7 @@ class LatinHypercubeInitialDesign(AbstractInitialDesign): """ def _select_configurations(self) -> list[Configuration]: - params = self._configspace.get_hyperparameters() + params = list(self._configspace.values()) constants = 0 for p in params: diff --git a/smac/initial_design/random_design.py b/smac/initial_design/random_design.py index d05fb1577e..8a12b515bb 100644 --- a/smac/initial_design/random_design.py +++ b/smac/initial_design/random_design.py @@ -12,9 +12,10 @@ class RandomInitialDesign(AbstractInitialDesign): """Initial design that evaluates random configurations.""" def _select_configurations(self) -> list[Configuration]: - configs = self._configspace.sample_configuration(size=self._n_configs) if self._n_configs == 1: - configs = [configs] + configs = [self._configspace.sample_configuration()] + else: + configs = self._configspace.sample_configuration(size=self._n_configs) for config in configs: config.origin = "Initial Design: Random" return configs diff --git a/smac/initial_design/sobol_design.py b/smac/initial_design/sobol_design.py index b16f20349b..1c3c646f50 100644 --- a/smac/initial_design/sobol_design.py +++ b/smac/initial_design/sobol_design.py @@ -22,14 +22,14 @@ class SobolInitialDesign(AbstractInitialDesign): def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) - if len(self._configspace.get_hyperparameters()) > 21201: + if len(list(self._configspace.values())) > 21201: raise ValueError( "The default initial design Sobol sequence can only handle up to 21201 dimensions. " "Please use a different initial design, such as the Latin Hypercube design." ) def _select_configurations(self) -> list[Configuration]: - params = self._configspace.get_hyperparameters() + params = list(self._configspace.values()) constants = 0 for p in params: diff --git a/smac/intensifier/successive_halving.py b/smac/intensifier/successive_halving.py index 96f018b892..b089dea51c 100644 --- a/smac/intensifier/successive_halving.py +++ b/smac/intensifier/successive_halving.py @@ -210,7 +210,7 @@ def get_state(self) -> dict[str, Any]: # noqa: D102 for seed, configs in self._tracker[key]: # We have to make key serializable new_key = f"{key[0]},{key[1]}" - tracker[new_key].append((seed, [config.get_dictionary() for config in configs])) + tracker[new_key].append((seed, [dict(config) for config in configs])) return {"tracker": tracker} diff --git a/smac/main/config_selector.py b/smac/main/config_selector.py index 4e3574d589..42a72f02bb 100644 --- a/smac/main/config_selector.py +++ b/smac/main/config_selector.py @@ -177,7 +177,7 @@ def __iter__(self) -> Iterator[Configuration]: # the configspace. logger.debug("No data available to train the model. Sample a random configuration.") - config = self._scenario.configspace.sample_configuration(1) + config = self._scenario.configspace.sample_configuration() self._call_callbacks_on_end(config) yield config self._call_callbacks_on_start() diff --git a/smac/model/abstract_model.py b/smac/model/abstract_model.py index 80a7312c44..2b371219ef 100644 --- a/smac/model/abstract_model.py +++ b/smac/model/abstract_model.py @@ -68,7 +68,7 @@ def __init__( raise RuntimeError("Instances must have the same number of features.") self._n_features = n_features - self._n_hps = len(self._configspace.get_hyperparameters()) + self._n_hps = len(list(self._configspace.values())) self._pca = PCA(n_components=self._pca_components) self._scaler = MinMaxScaler() diff --git a/smac/model/gaussian_process/abstract_gaussian_process.py b/smac/model/gaussian_process/abstract_gaussian_process.py index 9db465124c..df5f156766 100644 --- a/smac/model/gaussian_process/abstract_gaussian_process.py +++ b/smac/model/gaussian_process/abstract_gaussian_process.py @@ -171,7 +171,7 @@ def _get_all_priors( def _set_has_conditions(self) -> None: """Sets `has_conditions` on `current_param`.""" - has_conditions = len(self._configspace.get_conditions()) > 0 + has_conditions = len(self._configspace.conditions) > 0 to_visit = [] to_visit.append(self._kernel) diff --git a/smac/model/random_forest/abstract_random_forest.py b/smac/model/random_forest/abstract_random_forest.py index e407331be5..6f0f57b13c 100644 --- a/smac/model/random_forest/abstract_random_forest.py +++ b/smac/model/random_forest/abstract_random_forest.py @@ -28,9 +28,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: def _impute_inactive(self, X: np.ndarray) -> np.ndarray: X = X.copy() - for idx, hp in enumerate(self._configspace.get_hyperparameters()): + for idx, hp in enumerate(list(self._configspace.values())): if idx not in self._conditional: - parents = self._configspace.get_parents_of(hp.name) + parents = self._configspace.parents_of[hp.name] if len(parents) == 0: self._conditional[idx] = False else: diff --git a/smac/runhistory/encoder/abstract_encoder.py b/smac/runhistory/encoder/abstract_encoder.py index e9de8b14cb..a02bbab771 100644 --- a/smac/runhistory/encoder/abstract_encoder.py +++ b/smac/runhistory/encoder/abstract_encoder.py @@ -68,7 +68,7 @@ def __init__( self._instances = scenario.instances self._instance_features = scenario.instance_features self._n_features = scenario.count_instance_features() - self._n_params = len(scenario.configspace.get_hyperparameters()) + self._n_params = len(list(scenario.configspace.values())) if self._instances is not None and self._n_features == 0: logger.warning( diff --git a/smac/runhistory/runhistory.py b/smac/runhistory/runhistory.py index 091e3f95b2..ba69792246 100644 --- a/smac/runhistory/runhistory.py +++ b/smac/runhistory/runhistory.py @@ -262,7 +262,7 @@ def add( # Construct keys and values for the data dictionary for key, value in ( - ("config", config.get_dictionary()), + ("config", dict(config)), ("config_id", config_id), ("instance", instance), ("seed", seed), @@ -783,7 +783,7 @@ def save(self, filename: str | Path = "runhistory.json") -> None: config_origins = {} for id_, config in self._ids_config.items(): if id_ in config_ids_to_serialize: - configs[id_] = config.get_dictionary() + configs[id_] = dict(config) config_origins[id_] = config.origin diff --git a/smac/runner/target_function_script_runner.py b/smac/runner/target_function_script_runner.py index 5ab558906d..258c545e75 100644 --- a/smac/runner/target_function_script_runner.py +++ b/smac/runner/target_function_script_runner.py @@ -132,7 +132,7 @@ def run( status = StatusType.SUCCESS # Add config arguments to the kwargs - for k, v in config.get_dictionary().items(): + for k, v in dict(config).items(): if k in kwargs: raise RuntimeError(f"The key {k} is already in use. Please use a different one.") kwargs[k] = v diff --git a/smac/utils/configspace.py b/smac/utils/configspace.py index 8224f3ef90..78201f9235 100644 --- a/smac/utils/configspace.py +++ b/smac/utils/configspace.py @@ -53,11 +53,11 @@ def get_types( The bounds for the instance features are *not* added in this function. """ # Extract types vector for rf from config space and the bounds - types = [0] * len(configspace.get_hyperparameters()) + types = [0] * len(list(configspace.values())) bounds = [(np.nan, np.nan)] * len(types) - for i, param in enumerate(configspace.get_hyperparameters()): - parents = configspace.get_parents_of(param.name) + for i, param in enumerate(list(configspace.values())): + parents = configspace.parents_of[param.name] if len(parents) == 0: can_be_inactive = False else: diff --git a/tests/fixtures/config_selector.py b/tests/fixtures/config_selector.py index 7a1c1037b5..2ad2bece26 100644 --- a/tests/fixtures/config_selector.py +++ b/tests/fixtures/config_selector.py @@ -40,7 +40,7 @@ def __iter__(self): yield config while True: - config = self._scenario.configspace.sample_configuration(1) + config = self._scenario.configspace.sample_configuration() if config not in self._processed_configs: self._processed_configs.append(config) yield config diff --git a/tests/test_acquisition/test_maximizers.py b/tests/test_acquisition/test_maximizers.py index d7698e0a29..c1de71617b 100644 --- a/tests/test_acquisition/test_maximizers.py +++ b/tests/test_acquisition/test_maximizers.py @@ -205,8 +205,8 @@ def model(configspace: ConfigurationSpace): model = RandomForest(configspace) np.random.seed(0) - X = np.random.rand(100, len(configspace.get_hyperparameters())) - y = 1 - (np.sum(X, axis=1) / len(configspace.get_hyperparameters())) + X = np.random.rand(100, len(list(configspace.values()))) + y = 1 - (np.sum(X, axis=1) / len(list(configspace.values()))) model.train(X, y) return model diff --git a/tests/test_initial_design/test_factorical_design.py b/tests/test_initial_design/test_factorical_design.py index b963fa70bd..2903ee6b2f 100644 --- a/tests/test_initial_design/test_factorical_design.py +++ b/tests/test_initial_design/test_factorical_design.py @@ -39,7 +39,7 @@ def get_ordinal_param(name: str): for i in range(n_dim): for j, get_param in enumerate(get_params): param_name = f"x{i+1}_{j}" - cs.add_hyperparameter(get_param(param_name)) + cs.add(get_param(param_name)) design = FactorialInitialDesign( make_scenario(configspace=cs), diff --git a/tests/test_initial_design/test_initial_design.py b/tests/test_initial_design/test_initial_design.py index ed08db0d5b..5e335eed71 100644 --- a/tests/test_initial_design/test_initial_design.py +++ b/tests/test_initial_design/test_initial_design.py @@ -45,7 +45,7 @@ def test_config_numbers(make_scenario, configspace_small): scenario = make_scenario(configspace_small) configs = configspace_small.sample_configuration(n_configs) - n_hps = len(configspace_small.get_hyperparameters()) + n_hps = len(list(configspace_small.values())) dc = AbstractInitialDesign( scenario=scenario, diff --git a/tests/test_intensifier/test_abstract_intensifier.py b/tests/test_intensifier/test_abstract_intensifier.py index 6d878f9154..3e72161dcc 100644 --- a/tests/test_intensifier/test_abstract_intensifier.py +++ b/tests/test_intensifier/test_abstract_intensifier.py @@ -19,7 +19,7 @@ def test_setting_runhistory(make_scenario, configspace_small, make_config_select intensifier.config_selector = make_config_selector(scenario, runhistory, n_initial_configs=1) config = configspace_small.get_default_configuration() - config2 = configspace_small.sample_configuration(1) + config2 = configspace_small.sample_configuration() # Add some entries to the runhistory runhistory.add( @@ -55,7 +55,7 @@ def test_incumbent_selection_single_objective(make_scenario, configspace_small, intensifier.runhistory = runhistory config = configspace_small.get_default_configuration() - config2 = configspace_small.sample_configuration(1) + config2 = configspace_small.sample_configuration() runhistory.add(config=config, cost=50, time=0.0, instance=scenario.instances[0], seed=999) intensifier.update_incumbents(config) @@ -88,7 +88,7 @@ def test_incumbent_selection_multi_objective(make_scenario, configspace_small, m intensifier.runhistory = runhistory config = configspace_small.get_default_configuration() - config2 = configspace_small.sample_configuration(1) + config2 = configspace_small.sample_configuration() runhistory.add(config=config, cost=[50, 10], time=0.0, instance=scenario.instances[0], seed=999) intensifier.update_incumbents(config) @@ -182,8 +182,8 @@ def test_pareto_front1(make_scenario, configspace_small): runhistory = RunHistory() intensifier = AbstractIntensifier(scenario=scenario, max_config_calls=3, seed=0) intensifier.runhistory = runhistory - config1 = configspace_small.sample_configuration(1) - config2 = configspace_small.sample_configuration(1) + config1 = configspace_small.sample_configuration() + config2 = configspace_small.sample_configuration() runhistory.add( config=config1, @@ -211,8 +211,8 @@ def test_pareto_front2(make_scenario, configspace_small): runhistory = RunHistory() intensifier = AbstractIntensifier(scenario=scenario, max_config_calls=3, seed=0) intensifier.runhistory = runhistory - config1 = configspace_small.sample_configuration(1) - config2 = configspace_small.sample_configuration(1) + config1 = configspace_small.sample_configuration() + config2 = configspace_small.sample_configuration() runhistory.add( config=config1, @@ -240,9 +240,9 @@ def test_pareto_front3(make_scenario, configspace_small): runhistory = RunHistory() intensifier = AbstractIntensifier(scenario=scenario, max_config_calls=3, seed=0) intensifier.runhistory = runhistory - config1 = configspace_small.sample_configuration(1) - config2 = configspace_small.sample_configuration(1) - config3 = configspace_small.sample_configuration(1) + config1 = configspace_small.sample_configuration() + config2 = configspace_small.sample_configuration() + config3 = configspace_small.sample_configuration() runhistory.add( config=config1, diff --git a/tests/test_intensifier/test_intensifier.py b/tests/test_intensifier/test_intensifier.py index 0492637c27..aebd96faf8 100644 --- a/tests/test_intensifier/test_intensifier.py +++ b/tests/test_intensifier/test_intensifier.py @@ -192,7 +192,7 @@ def test_intensifier_with_filled_runhistory(make_scenario, configspace_small, ma intensifier.config_selector = make_config_selector(scenario, runhistory, n_initial_configs=1) intensifier.runhistory = runhistory config = configspace_small.get_default_configuration() - config2 = configspace_small.sample_configuration(1) + config2 = configspace_small.sample_configuration() # Add some entries to the runhistory runhistory.add( diff --git a/tests/test_intensifier/test_successive_halving.py b/tests/test_intensifier/test_successive_halving.py index 163c2e6fdb..859dec95ab 100644 --- a/tests/test_intensifier/test_successive_halving.py +++ b/tests/test_intensifier/test_successive_halving.py @@ -131,7 +131,7 @@ def test_incumbents_any_budget(make_scenario, configspace_small, make_config_sel intensifier.config_selector = make_config_selector(scenario, runhistory, n_initial_configs=1) intensifier.runhistory = runhistory config = configspace_small.get_default_configuration() - config2 = configspace_small.sample_configuration(1) + config2 = configspace_small.sample_configuration() # Add some entries to the runhistory runhistory.add(config=config, cost=0.5, time=0.0, seed=8, budget=1, status=StatusType.SUCCESS) @@ -176,7 +176,7 @@ def test_incumbents_highest_observed_budget(make_scenario, configspace_small, ma intensifier.runhistory = runhistory intensifier.__post_init__() config = configspace_small.get_default_configuration() - config2 = configspace_small.sample_configuration(1) + config2 = configspace_small.sample_configuration() # Add some entries to the runhistory runhistory.add(config=config, cost=0.5, time=0.0, seed=8, budget=1, status=StatusType.SUCCESS) @@ -217,7 +217,7 @@ def test_incumbents_highest_budget(make_scenario, configspace_small, make_config intensifier.runhistory = runhistory intensifier.__post_init__() config = configspace_small.get_default_configuration() - config2 = configspace_small.sample_configuration(1) + config2 = configspace_small.sample_configuration() # Add some entries to the runhistory runhistory.add(config=config, cost=0.5, time=0.0, seed=8, budget=1, status=StatusType.SUCCESS) diff --git a/tests/test_main/_test_boing.py b/tests/test_main/_test_boing.py index 5e3efab6de..d8b536a5d4 100644 --- a/tests/test_main/_test_boing.py +++ b/tests/test_main/_test_boing.py @@ -149,8 +149,8 @@ def test_do_switching(make_scenario): def test_subspace_extraction(): cs = ConfigurationSpace(0) - cs.add_hyperparameter(UniformFloatHyperparameter("x0", 0.0, 1.0)) - cs.add_hyperparameter(CategoricalHyperparameter("x1", [0, 1, 2, 3, 4, 5])) + cs.add(UniformFloatHyperparameter("x0", 0.0, 1.0)) + cs.add(CategoricalHyperparameter("x1", [0, 1, 2, 3, 4, 5])) rf = RandomForest( cs, diff --git a/tests/test_model/_test_gp_gpytorch.py b/tests/test_model/_test_gp_gpytorch.py index d9e90f9b3a..cfc2d34993 100644 --- a/tests/test_model/_test_gp_gpytorch.py +++ b/tests/test_model/_test_gp_gpytorch.py @@ -60,7 +60,7 @@ def get_gp(n_dimensions, rs, noise=None, normalize_y=True) -> GPyTorchGaussianPr configspace = ConfigurationSpace() for i in range(n_dimensions): - configspace.add_hyperparameter(UniformFloatHyperparameter("x%d" % i, 0, 1)) + configspace.add(UniformFloatHyperparameter("x%d" % i, 0, 1)) model = GPyTorchGaussianProcess( configspace=configspace, @@ -122,9 +122,9 @@ def get_mixed_gp(cat_dims, cont_dims, rs, normalize_y=True): cs = ConfigurationSpace() for c in cont_dims: - cs.add_hyperparameter(UniformFloatHyperparameter("X%d" % c, 0, 1)) + cs.add(UniformFloatHyperparameter("X%d" % c, 0, 1)) for c in cat_dims: - cs.add_hyperparameter(CategoricalHyperparameter("X%d" % c, [0, 1, 2, 3])) + cs.add(CategoricalHyperparameter("X%d" % c, [0, 1, 2, 3])) model = GPyTorchGaussianProcess( configspace=cs, diff --git a/tests/test_model/_test_lgpga.py b/tests/test_model/_test_lgpga.py index fead691f50..5ba95f3d93 100644 --- a/tests/test_model/_test_lgpga.py +++ b/tests/test_model/_test_lgpga.py @@ -78,7 +78,7 @@ def setUp(self) -> None: self.gp_model, self.cs = generate_lgpga(self.kernel, n_dimensions=num_dims, rs=rs) def test_init(self): - np.testing.assert_equal(self.gp_model.cont_dims, np.arange(len(self.cs.get_hyperparameters()))) + np.testing.assert_equal(self.gp_model.cont_dims, np.arange(len(list(self.cs.values())))) np.testing.assert_equal(self.gp_model.cat_dims, np.array([])) def test_update_attribute(self): diff --git a/tests/test_model/test_gp.py b/tests/test_model/test_gp.py index 9e21925bc1..69912bf010 100644 --- a/tests/test_model/test_gp.py +++ b/tests/test_model/test_gp.py @@ -49,7 +49,7 @@ def get_gp(n_dimensions, seed, noise=1e-3, normalize_y=True) -> GaussianProcess: configspace = ConfigurationSpace() for i in range(n_dimensions): - configspace.add_hyperparameter(UniformFloatHyperparameter("x%d" % i, 0, 1)) + configspace.add(UniformFloatHyperparameter("x%d" % i, 0, 1)) rs = np.random.RandomState(seed) @@ -122,9 +122,9 @@ def get_mixed_gp(cat_dims, cont_dims, seed, noise=1e-3, normalize_y=True): cs = ConfigurationSpace() for c in cont_dims: - cs.add_hyperparameter(UniformFloatHyperparameter("X%d" % c, 0, 1)) + cs.add(UniformFloatHyperparameter("X%d" % c, 0, 1)) for c in cat_dims: - cs.add_hyperparameter(CategoricalHyperparameter("X%d" % c, [0, 1, 2, 3])) + cs.add(CategoricalHyperparameter("X%d" % c, [0, 1, 2, 3])) rs = np.random.RandomState(seed) @@ -416,11 +416,11 @@ def test_normalization(): """ def test_impute_inactive_hyperparameters(): cs = ConfigurationSpace() - a = cs.add_hyperparameter(CategoricalHyperparameter("a", [0, 1])) - b = cs.add_hyperparameter(CategoricalHyperparameter("b", [0, 1])) - c = cs.add_hyperparameter(UniformFloatHyperparameter("c", 0, 1)) - cs.add_condition(EqualsCondition(b, a, 1)) - cs.add_condition(EqualsCondition(c, a, 0)) + a = CategoricalHyperparameter("a", [0, 1]) + b = CategoricalHyperparameter("b", [0, 1]) + c = UniformFloatHyperparameter("c", 0, 1) + cs.add([a, b, c]) + cs.add([EqualsCondition(b, a, 1), EqualsCondition(c, a, 0)]) cs.seed(1) configs = cs.sample_configuration(size=100) diff --git a/tests/test_model/test_gp_mcmc.py b/tests/test_model/test_gp_mcmc.py index da5b947ed5..368320ac0c 100644 --- a/tests/test_model/test_gp_mcmc.py +++ b/tests/test_model/test_gp_mcmc.py @@ -47,7 +47,7 @@ def get_gp(n_dimensions, seed, noise=1e-3, normalize_y=True, average_samples=Fal configspace = ConfigurationSpace() for i in range(n_dimensions): - configspace.add_hyperparameter(UniformFloatHyperparameter("x%d" % i, 0, 1)) + configspace.add(UniformFloatHyperparameter("x%d" % i, 0, 1)) rs = np.random.RandomState(seed) model = MCMCGaussianProcess( diff --git a/tests/test_model/test_mo.py b/tests/test_model/test_mo.py index fd93d59ba1..9cbe178a10 100644 --- a/tests/test_model/test_mo.py +++ b/tests/test_model/test_mo.py @@ -14,7 +14,7 @@ def _get_cs(n_dimensions): configspace = ConfigurationSpace() for i in range(n_dimensions): - configspace.add_hyperparameter(UniformFloatHyperparameter("x%d" % i, 0, 1)) + configspace.add(UniformFloatHyperparameter("x%d" % i, 0, 1)) return configspace diff --git a/tests/test_model/test_rf.py b/tests/test_model/test_rf.py index c81549a16e..7b3add5491 100644 --- a/tests/test_model/test_rf.py +++ b/tests/test_model/test_rf.py @@ -19,7 +19,7 @@ def _get_cs(n_dimensions): configspace = ConfigurationSpace(seed=0) for i in range(n_dimensions): - configspace.add_hyperparameter(UniformFloatHyperparameter("x%d" % i, 0, 1)) + configspace.add(UniformFloatHyperparameter("x%d" % i, 0, 1)) return configspace @@ -178,10 +178,10 @@ def test_predict_with_actual_values(): def test_with_ordinal(): cs = ConfigurationSpace(seed=0) - _ = cs.add_hyperparameter(CategoricalHyperparameter("a", [0, 1], default_value=0)) - _ = cs.add_hyperparameter(OrdinalHyperparameter("b", [0, 1], default_value=1)) - _ = cs.add_hyperparameter(UniformFloatHyperparameter("c", lower=0.0, upper=1.0, default_value=1)) - _ = cs.add_hyperparameter(UniformIntegerHyperparameter("d", lower=0, upper=10, default_value=1)) + cs.add(CategoricalHyperparameter("a", [0, 1], default_value=0)) + cs.add(OrdinalHyperparameter("b", [0, 1], default_value=1)) + cs.add(UniformFloatHyperparameter("c", lower=0.0, upper=1.0, default_value=1)) + cs.add(UniformIntegerHyperparameter("d", lower=0, upper=10, default_value=1)) F = {} for i in range(1): @@ -256,13 +256,12 @@ def test_with_ordinal(): def test_impute_inactive_hyperparameters(): cs = ConfigurationSpace(seed=0) - a = cs.add_hyperparameter(CategoricalHyperparameter("a", [0, 1, 2])) - b = cs.add_hyperparameter(CategoricalHyperparameter("b", [0, 1])) - c = cs.add_hyperparameter(UniformFloatHyperparameter("c", 0, 1)) - d = cs.add_hyperparameter(OrdinalHyperparameter("d", [0, 1, 2])) - cs.add_condition(EqualsCondition(b, a, 1)) - cs.add_condition(EqualsCondition(c, a, 0)) - cs.add_condition(EqualsCondition(d, a, 2)) + a = CategoricalHyperparameter("a", [0, 1, 2]) + b = CategoricalHyperparameter("b", [0, 1]) + c = UniformFloatHyperparameter("c", 0, 1) + d = OrdinalHyperparameter("d", [0, 1, 2]) + cs.add([a, b, c, d]) + cs.add([EqualsCondition(b, a, 1), EqualsCondition(c, a, 0), EqualsCondition(d, a, 2)]) configs = cs.sample_configuration(size=100) config_array = convert_configurations_to_array(configs) diff --git a/tests/test_multi_objective/test_combined_function.py b/tests/test_multi_objective/test_combined_function.py index 5b1a8527e0..5db3c85fa4 100644 --- a/tests/test_multi_objective/test_combined_function.py +++ b/tests/test_multi_objective/test_combined_function.py @@ -49,7 +49,7 @@ def tae(cfg, seed=0): @pytest.fixture def configspace(): cs = ConfigurationSpace() - cs.add_hyperparameter(Float("x", (MIN_X, MAX_X), default=MIN_X)) + cs.add(Float("x", (MIN_X, MAX_X), default=MIN_X)) return cs diff --git a/tests/test_runhistory/test_runhistory_encoder.py b/tests/test_runhistory/test_runhistory_encoder.py index ac9824534c..638c7fdbf4 100644 --- a/tests/test_runhistory/test_runhistory_encoder.py +++ b/tests/test_runhistory/test_runhistory_encoder.py @@ -52,12 +52,12 @@ def test_transform(runhistory, make_scenario, configspace_small, configs): # upperbounded by 1. upper_bounds = { hp.name: (hp.size - 1) if isinstance(hp, CategoricalHyperparameter) else 1.0 - for hp in configspace_small.get_hyperparameters() + for hp in list(configspace_small.values()) } # Need to ensure they match the order in the Configuration vectorized form sorted_by_indices = sorted( upper_bounds.items(), - key=lambda x: configspace_small._hyperparameter_idx[x[0]], + key=lambda x: configspace_small.index_of[x[0]], ) upper = np.array([upper_bound for _, upper_bound in sorted_by_indices]) lower = 0.0