diff --git a/CHANGELOG.md b/CHANGELOG.md index e360903..8a3ab40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [Untracked Changes] +### Added +- More optional outputs for verbose settings +- Parameters in ParamSpace can also be indexed by name +- Parameters now have search_space property, to modify the optimizer search space from the full space +- Continuous parameters have search_min/search_max; Discete parameteres have search_categories + +### Modified +- Optimizer and Campaign X_space attributes are now assigned using setter + +### Remvoed +- Torch device references and options (GPU compatibility may be re-added) + ## [0.8.4] ### Added - Campaign X_best method diff --git a/obsidian/campaign/campaign.py b/obsidian/campaign/campaign.py index 3c8b8bd..8ed10c6 100644 --- a/obsidian/campaign/campaign.py +++ b/obsidian/campaign/campaign.py @@ -47,7 +47,7 @@ def __init__(self, objective: Objective | None = None, seed: int | None = None): - self.X_space = X_space + self.set_X_space(X_space) self.data = pd.DataFrame() optimizer = BayesianOptimizer(X_space, seed=seed) if optimizer is None else optimizer @@ -101,6 +101,15 @@ def clear_data(self): self.data = pd.DataFrame() self.iter = 0 + @property + def X_space(self) -> ParamSpace: + """Campaign ParamSpace""" + return self._X_space + + def set_X_space(self, X_space: ParamSpace): + """Sets the campaign ParamSpace""" + self._X_space = X_space + @property def optimizer(self) -> Optimizer: """Campaign Optimizer""" @@ -342,6 +351,8 @@ def suggest(self, **optim_kwargs): """ if self.optimizer.is_fit: try: + # In case X_space has changed, re-set the optimizer X_space + self.optimizer.set_X_space(self.X_space) X, eval = self.optimizer.suggest(objective=self.objective, **optim_kwargs) return (X, eval) except Exception: @@ -371,11 +382,11 @@ def _profile_hv(self): for i in iters: iter_index = self.data.query(f'Iteration <= {i}').index out_iter = self.out.loc[iter_index, :] - out_iter = torch.tensor(out_iter.values).to(self.optimizer.device) + out_iter = torch.tensor(out_iter.values) hv[i] = self.optimizer.hypervolume(out_iter) self.data['Hypervolume (iter)'] = self.data.apply(lambda x: hv[x['Iteration']], axis=1) - self.data['Pareto Front'] = self.optimizer.pareto(torch.tensor(self.out.values).to(self.optimizer.device)) + self.data['Pareto Front'] = self.optimizer.pareto(torch.tensor(self.out.values)) return diff --git a/obsidian/experiment/design.py b/obsidian/experiment/design.py index dae3758..eaa95f0 100644 --- a/obsidian/experiment/design.py +++ b/obsidian/experiment/design.py @@ -90,8 +90,7 @@ def initialize(self, if seed is not None: torch.manual_seed(seed) - if not torch.cuda.is_available(): - torch.use_deterministic_algorithms(True) + torch.use_deterministic_algorithms(True) if sample_custom is not None: if sample_custom.shape[1] != d: diff --git a/obsidian/optimizer/base.py b/obsidian/optimizer/base.py index f4b5d73..9fbc6af 100644 --- a/obsidian/optimizer/base.py +++ b/obsidian/optimizer/base.py @@ -44,15 +44,25 @@ def __init__(self, self.seed = seed if self.seed is not None: torch.manual_seed(self.seed) - if not torch.cuda.is_available(): - torch.use_deterministic_algorithms(True) + torch.use_deterministic_algorithms(True) np.random.seed(self.seed) random.seed(self.seed) # Store the parameter space which contains useful reference properties if not isinstance(X_space, ParamSpace): raise TypeError('X_space must be an obsidian ParamSpace object') - self.X_space = X_space + self.set_X_space(X_space) + + @property + def X_space(self): + """ + ParamSpace: The parameter space defining the search space for the optimization. + """ + return self._X_space + + def set_X_space(self, X_space: ParamSpace): + self._X_space = X_space + return def _fixed_features(self, fixed_var: dict | None = None) -> list: @@ -101,7 +111,7 @@ def _fixed_features(self, # First, get the cartesian product of all of the categorical/ordinal combos for x in self.X_space.X_discrete: if x.name not in fixed_var.keys(): # Fixed_var should take precedent and lock out other combinations - df_i = pd.DataFrame({x.name: x.categories}) + df_i = pd.DataFrame({x.name: x.search_categories}) df_list.append(df_i) # Merge by cross diff --git a/obsidian/optimizer/bayesian.py b/obsidian/optimizer/bayesian.py index ad5d42e..f3a3e55 100644 --- a/obsidian/optimizer/bayesian.py +++ b/obsidian/optimizer/bayesian.py @@ -64,7 +64,6 @@ class BayesianOptimizer(Optimizer): Attributes: surrogate_type (list[str]): The shorthand name of each surrogate model. surrogate_hps (list[dict]): The hyperparameters for each surrogate model. - device (str): The device to use for computations ('cuda' if available, 'cpu' otherwise). is_fit (bool): Indicates whether the surrogate model has been fit to data. Raises: @@ -122,8 +121,6 @@ def _load_surrogate_dict(surrogate_dict): if surrogate_str not in model_class_dict.keys(): raise KeyError(f'Surrogate model must be selected from one of: {model_class_dict.keys()}') - self.device = 'cuda' if torch.cuda.is_available() else 'cpu' - return @property @@ -230,7 +227,7 @@ def fit(self, for i in range(self.n_response): self.surrogate.append( SurrogateBoTorch(model_type=self.surrogate_type[i], seed=self.seed, - verbose=self.verbose, hps=self.surrogate_hps[i])) + verbose=self.verbose >= 2, hps=self.surrogate_hps[i])) # Handle response NaN values on a response-by-response basis f_train_i = self.f_train.iloc[:, i] @@ -246,9 +243,11 @@ def fit(self, self.surrogate[i].fit(X_t_train_valid, f_train_i_valid, cat_dims=self.X_space.X_t_cat_idx, task_feature=self.X_space.X_t_task_idx) - if self.verbose > 0: - print(f'{self.surrogate_type[i]} model has been fit \ - to data with a train-score of: {self.surrogate[i].r2_score:.3g} for response: {self.y_names[i]}') + if self.verbose >= 1: + print(f'{self.surrogate_type[i]} model has been fit to data' + + 'with an R2-train-score of: {self.surrogate[i].r2_score:.3g}' + + (f'and a training-loss of: {self.surrogate[i].loss:.3g}' if self.verbose >= 2 else '') + + ' for response: {self.y_names[i]}') return def save_state(self) -> dict: @@ -381,6 +380,9 @@ def predict(self, raise NameError('X for prediction does not contain all of the \ required predictors from the training set') + if self.verbose >= 3: + print(f'Predicting {X.shape[0]} experiments [...]') + X_names = list(self.X_space.X_names) X_pred = X[X_names].dropna(subset=X_names) # Reinforce order and non-nan before proceeding nan_indices = np.where(pd.isnull(X[X_names]).any(axis=1))[0].tolist() @@ -516,7 +518,9 @@ def _parse_aq_kwargs(self, # If using an objective, want to calculate EI/PI from here o = f_t if not objective else objective(f_t.unsqueeze(0), X_baseline).squeeze(0) - + if objective: + aq_kwargs['objective'] = objective + # Improvement aqs based on inflation or deflation of best point if aq in ['EI', 'PI']: o_max = o.max(dim=0).values * (1+hps['inflate']) @@ -552,9 +556,12 @@ def _parse_aq_kwargs(self, aq_kwargs['partitioning'] = NondominatedPartitioning(aq_kwargs['ref_point'], Y=o) if aq == 'NIPV': - X_bounds = torch.tensor([[0.0, 1.0]]*self.X_space.n_tdim, dtype=TORCH_DTYPE).T.to(self.device) + X_bounds = torch.tensor([[0.0, 1.0]]*self.X_space.n_tdim, dtype=TORCH_DTYPE).T qmc_samples = draw_sobol_samples(bounds=X_bounds, n=128, q=m_batch) aq_kwargs['mc_points'] = qmc_samples.squeeze(-2) + aq_kwargs['sampler'] = None + if objective: + raise UnsupportedError('NIPV does not support objectives') if aq == 'NParEGO': w = hps['scalarization_weights'] @@ -660,10 +667,15 @@ def suggest(self, if not self.is_fit: raise UnfitError('Surrogate model must be fit before suggesting new experiments') - + + if self.verbose >= 2: + print(f'Optimizing {m_batch} experiments [...]') + # Use indexing to handle if suggestions are made for a subset of fit targets/surrogates target = self._validate_target(target) target_locs = [self.y_names.index(t.name) for t in target] + + # Select the model(s) to use for optimization model_list = [one_surrogate.torch_model for i, one_surrogate in enumerate(self.surrogate) if i in target_locs] if all(isinstance(m, GPyTorchModel) for m in model_list): model = ModelListGP(*model_list) @@ -685,10 +697,11 @@ def suggest(self, optim_type = 'single' if o_dim == 1 else 'multi' - # Default to noisy expected improvement if no aq method is provided + # Default if no aq method is provided if not acquisition: acquisition = [aq_defaults[optim_type]] + # Type check for acquisition if not isinstance(acquisition, list): raise TypeError('acquisition must be a list of strings or dictionaries') if not all(isinstance(item, (str, dict)) for item in acquisition): @@ -702,14 +715,16 @@ def suggest(self, samplers = [] for m in model.models: if isinstance(m, DNN): - sampler_i = IndexSampler(sample_shape=torch.Size([optim_samples]), seed=self.seed).to(self.device) + sampler_i = IndexSampler(sample_shape=torch.Size([optim_samples]), seed=self.seed) else: - sampler_i = SobolQMCNormalSampler(sample_shape=torch.Size([optim_samples]), seed=self.seed).to(self.device) + sampler_i = SobolQMCNormalSampler(sample_shape=torch.Size([optim_samples]), seed=self.seed) samplers.append(sampler_i) sampler = ListSampler(*samplers) else: - sampler = SobolQMCNormalSampler(sample_shape=torch.Size([optim_samples]), seed=self.seed).to(self.device) - X_bounds = torch.tensor([[0.0, 1.0]]*self.X_space.n_tdim, dtype=TORCH_DTYPE).T.to(self.device) + sampler = SobolQMCNormalSampler(sample_shape=torch.Size([optim_samples]), seed=self.seed) + + # Calculate search bounds for optimization + X_bounds = torch.tensor(self.X_space.search_space.values, dtype=TORCH_DTYPE) # Set up master lists to hold the candidates from multi-acquisition results candidates_all = [] @@ -744,11 +759,8 @@ def suggest(self, # Use aq_kwargs so that extra unnecessary ones in hps get removed for certain aq funcs aq_kwargs = {'model': model, 'sampler': sampler, 'X_pending': X_t_pending} - if aq_str != 'NIPV': - aq_kwargs['objective'] = objective - else: - aq_kwargs['sampler'] = None - + aq_kwargs.update(self._parse_aq_kwargs(aq_str, aq_hps, m_batch, target_locs, X_t_pending, objective)) + # Type check for constraints for constraint_type in eq_constraints, ineq_constraints, nleq_constraints, out_constraints: if constraint_type: @@ -776,8 +788,6 @@ def suggest(self, if fixed_features_list: raise UnsupportedError('Nonlinear constraints are not supported with discrete features.') - aq_kwargs.update(self._parse_aq_kwargs(aq_str, aq_hps, m_batch, target_locs, X_t_pending, objective)) - # Hypervolume aqs fail with X_t_pending when optim_sequential=True if aq_str in ['NEHVI', 'EHVI']: optim_sequential = False @@ -804,6 +814,9 @@ def suggest(self, options=optim_options, **optim_kwargs) + if self.verbose >= 2: + print(f'Optimized {aq_str} acquisition function successfully') + candidates_i = self.X_space.decode( pd.DataFrame(candidates.detach().cpu().numpy(), columns=[col for col in self.X_t_train.columns if col not in self.X_space.X_task])) @@ -857,7 +870,7 @@ def evaluate(self, """ if not self.is_fit: - raise UnfitError('Surrogate model must be fit before suggesting new experiments') + raise UnfitError('Surrogate model must be fit before evaluating new experiments') # Use indexing to handle if suggestions are made for a subset of fit targets/surrogates target = self._validate_target(target) @@ -865,8 +878,8 @@ def evaluate(self, # Begin evaluation with y_predict with pred interval eval_suggest = self.predict(X_suggest) - X_t = torch.tensor(self.X_space.encode(X_suggest).values, dtype=TORCH_DTYPE).to(self.device) - X_t_train = torch.tensor(self.X_space.encode(self.X_train).values, dtype=TORCH_DTYPE).to(self.device) + X_t = torch.tensor(self.X_space.encode(X_suggest).values, dtype=TORCH_DTYPE) + X_t_train = torch.tensor(self.X_space.encode(self.X_train).values, dtype=TORCH_DTYPE) # Evaluate f_predict on new and pending points f_all = [] @@ -924,27 +937,25 @@ def evaluate(self, optim_type = 'single' if o_dim == 1 else 'multi' if eval_aq: - # Default to noisy expected improvement if no aq method is provided + # Default if no aq method is provided if not acquisition: acquisition = [aq_defaults[optim_type]] if not isinstance(acquisition, (str, dict)): raise TypeError('Acquisition must be either a string or a dictionary') - # Extract acq function names and custom hyperparameters from the 'acquisition' list in config - aq_str, aq_hps = self._validate_hypers(o_dim, acquisition) - model_list = [one_surrogate.torch_model for i, one_surrogate in enumerate(self.surrogate) if i in target_locs] if all(isinstance(m, GPyTorchModel) for m in model_list): model = ModelListGP(*model_list) else: model = ModelList(*model_list) + + # Extract acq function names and custom hyperparameters from the 'acquisition' list in config + aq_str, aq_hps = self._validate_hypers(o_dim, acquisition) # Use aq_kwargs so that extra unnecessary ones in hps get removed for certain aq funcs - aq_kwargs = {'model': model, 'X_pending': X_t_pending} - if aq_str != 'NIPV': - aq_kwargs['objective'] = objective - + aq_kwargs = {'model': model, 'sampler': None, 'X_pending': X_t_pending} + aq_kwargs.update(self._parse_aq_kwargs(aq_str, aq_hps, X_suggest.shape[0], target_locs, X_t_pending, objective)) # If it's random search, no need to evaluate aq diff --git a/obsidian/parameters/base.py b/obsidian/parameters/base.py index 95d02c1..25d1222 100644 --- a/obsidian/parameters/base.py +++ b/obsidian/parameters/base.py @@ -22,6 +22,11 @@ def _validate_value(self, """Validate data inputs""" pass # pragma: no cover + @abstractmethod + def set_search(self): + """Set the search space for the parameter""" + pass # pragma: no cover + @abstractmethod def encode(X): """Encode parameter to a format that can be used for training""" diff --git a/obsidian/parameters/continuous.py b/obsidian/parameters/continuous.py index f8b4b56..877e0d1 100644 --- a/obsidian/parameters/continuous.py +++ b/obsidian/parameters/continuous.py @@ -40,6 +40,26 @@ def range(self): """The range of the parameter (max - min)""" return self.max-self.min + def set_search(self, + search_min: int | float, + search_max: int | float): + """ + Set the search space for the parameter + + Args: + search_min (int or float): The minimum value of the search space. + search_max (int or float): The maximum value of the search space. + """ + for val in [search_min, search_max]: + self._validate_value(val) + + self.search_min = search_min + self.search_max = search_max + + def open_search(self): + """Set the search space to the parameter space""" + self.set_search(self.min, self.max) + def __repr__(self): """String representation of object""" return f"{self.__class__.__name__}(name={self.name}, min={self.min}, max={self.max})" @@ -63,7 +83,9 @@ def _validate_value(self, value: int | float): def __init__(self, name: str, min: int | float, - max: int | float): + max: int | float, + search_min: int | float = None, + search_max: int | float = None): super().__init__(name=name) if max < min: warnings.warn(f'Minimum value {min} is greater than maximum value {max}. Auto-swapping values.', UserWarning) @@ -72,6 +94,13 @@ def __init__(self, self.max = max for val in [min, max]: self._validate_value(val) + + # Set the search space to the parameter space by default + if not search_min: + search_min = self.min + if not search_max: + search_max = self.max + self.set_search(search_min, search_max) class Param_Observational(Param_Continuous): @@ -89,6 +118,8 @@ def __init__(self, name: str, min: int | float, max: int | float, + search_min: int | float = None, + search_max: int | float = None, design_point: int | float | None = None): - super().__init__(name=name, min=min, max=max) + super().__init__(name=name, min=min, max=max, search_min=search_min, search_max=search_max) self.design_point = design_point = design_point if design_point is not None else max diff --git a/obsidian/parameters/discrete.py b/obsidian/parameters/discrete.py index 4eea17d..f9e68bc 100644 --- a/obsidian/parameters/discrete.py +++ b/obsidian/parameters/discrete.py @@ -71,6 +71,23 @@ def max(self): """Maximum parameter value (nc-1)""" return self.nc-1 + def set_search(self, + search_categories: list[str]): + """ + Set the search space for the parameter + + Args: + search_categories (list[str]): The search space for the parameter. + """ + for c in search_categories: + self._validate_value(c) + + self.search_categories = search_categories + + def open_search(self): + """Set the search space to the parameter space""" + self.set_search(self.categories) + def _validate_value(self, value: str): """ @@ -91,7 +108,8 @@ def _validate_value(self, def __init__(self, name: str, - categories: str | list[str]): + categories: str | list[str], + search_categories: list[str] = None): super().__init__(name=name) if isinstance(categories, str): self.categories = categories.split(',') @@ -100,6 +118,15 @@ def __init__(self, self.categories = categories for c in self.categories: self._validate_value(c) + + # Set the search space to the parameter space by default + if not search_categories: + search_categories = self.categories + else: + if isinstance(categories, str): + search_categories = search_categories.split(',') + search_categories = [c.rstrip().lstrip() for c in search_categories] + self.set_search(search_categories) def __repr__(self): """String representation of object""" diff --git a/obsidian/parameters/param_space.py b/obsidian/parameters/param_space.py index b4fe07a..bc8a2b5 100644 --- a/obsidian/parameters/param_space.py +++ b/obsidian/parameters/param_space.py @@ -40,6 +40,7 @@ class ParamSpace(ABC): X_t_discrete_idx (list[int]): A list of the indices of the transformed discrete parameters. X_t_cat_idx (list[int]): A list of the indices of the transformed categorical parameters. X_t_task_idx (int): The index of the transformed task parameter. + search_space (pd.DataFrame): The allowable search space for future optimization. Raises: ValueError: If the X_names are not unique. @@ -118,8 +119,10 @@ def __repr__(self): """String representation of object""" return f"{self.__class__.__name__}(params={[p.name for p in self]})" - def __getitem__(self, index: int) -> Parameter: + def __getitem__(self, index: int | str) -> Parameter: """Retrieve a parameter by index""" + if isinstance(index, str): + index = self.X_names.index(index) return self.params[index] def map_transform(self) -> dict: @@ -217,6 +220,38 @@ def decode(self, X): """Decode parameter from transformed space""" return self._transform(X, type='decode') + @property + def search_space(self) -> pd.DataFrame: + """ + Returns the search space for the parameter space. + + Returns: + pd.DataFrame: A dataframe containing the search space for the parameter space. + """ + + # Establish the boundaries in real space + X_search_t = pd.DataFrame() + for param in self: + # For continuous, encode the continuous bounds + if isinstance(param, Param_Continuous): + cont_bounds = pd.DataFrame(param.encode([param.search_min, param.search_max]), columns=[param.name]) + X_search_t = pd.concat([X_search_t, cont_bounds], axis=1) + + # For discrete, encode the available categories, then log the min-max of encoded columns + elif isinstance(param, Param_Discrete): + # Discrete parameter bounds aren't actually handled here; they are handled in optimizer._fixed_features()) + cat_e = param.encode(param.search_categories) + disc_bounds = pd.DataFrame(np.vstack([[0]*cat_e.shape[-1], cat_e.max().values]), + columns=cat_e.columns) + X_search_t = pd.concat([X_search_t, disc_bounds], axis=1) + + return X_search_t + + def open_search(self): + """Set the search space to the parameter space""" + for param in self: + param.open_search() + def save_state(self) -> dict: """ Saves the state of the ParamSpace object. diff --git a/obsidian/surrogates/base.py b/obsidian/surrogates/base.py index b827874..64bde0e 100644 --- a/obsidian/surrogates/base.py +++ b/obsidian/surrogates/base.py @@ -48,8 +48,7 @@ def __init__(self, self.seed = seed if self.seed is not None: torch.manual_seed(self.seed) - if not torch.cuda.is_available(): - torch.use_deterministic_algorithms(True) + torch.use_deterministic_algorithms(True) np.random.seed(self.seed) random.seed(self.seed) @@ -93,9 +92,9 @@ def _prepare(self, tuple: A tuple containing the converted input data (X_torch) and target data (Y_torch) as torch Tensors. """ self._validate_data(X, y) - X_torch = torch.tensor(X.to_numpy(), dtype=TORCH_DTYPE).to(self.device) + X_torch = torch.tensor(X.to_numpy(), dtype=TORCH_DTYPE) if y is not None: - y_torch = torch.tensor(y.to_numpy(), dtype=TORCH_DTYPE).to(self.device).unsqueeze(-1) + y_torch = torch.tensor(y.to_numpy(), dtype=TORCH_DTYPE).unsqueeze(-1) return (X_torch, y_torch) else: return X_torch diff --git a/obsidian/surrogates/botorch.py b/obsidian/surrogates/botorch.py index 109954b..25d668e 100644 --- a/obsidian/surrogates/botorch.py +++ b/obsidian/surrogates/botorch.py @@ -41,7 +41,6 @@ class SurrogateBoTorch(SurrogateModel): - ``'DNN'``: Dropout neural network. Uses MC sampling to mask neurons during training and to estimate uncertainty. - device (str): The device on which the model is run. hps (dict): Optional surrogate function hyperparameters. mll (ExactMarginalLogLikelihood): The marginal log likelihood of the model. torch_model (torch.nn.Module): The torch model for the surrogate. @@ -51,16 +50,11 @@ class SurrogateBoTorch(SurrogateModel): def __init__(self, model_type: str = 'GP', seed: int | None = None, - verbose: int | None = False, + verbose: bool = False, hps: dict = {}): super().__init__(model_type=model_type, seed=seed, verbose=verbose) - if torch.cuda.is_available(): - self.device = 'cuda' - else: - self.device = 'cpu' - # Optional surrogate function hyperparameters self.hps = hps @@ -102,16 +96,16 @@ def init_model(self, if issubclass(model_class_dict[self.model_type], GPyTorchModel): if self.model_type == 'GP' and cat_dims: # If cat_dims is not an empty list, returns True - self.torch_model = model_class_dict['MixedGP'](train_X=X_p, train_Y=y_p, cat_dims=cat_dims).to(self.device) + self.torch_model = model_class_dict['MixedGP'](train_X=X_p, train_Y=y_p, cat_dims=cat_dims) else: if self.model_type == 'MTGP': self.torch_model = model_class_dict[self.model_type]( - train_X=X_p, train_Y=y_p, task_feature=task_feature, **self.hps).to(self.device) + train_X=X_p, train_Y=y_p, task_feature=task_feature, **self.hps) else: # Note: Doesn't matter if input empty dictionary as self.hps for model without those additional args - self.torch_model = model_class_dict[self.model_type](train_X=X_p, train_Y=y_p, **self.hps).to(self.device) + self.torch_model = model_class_dict[self.model_type](train_X=X_p, train_Y=y_p, **self.hps) else: - self.torch_model = model_class_dict[self.model_type](train_X=X_p, train_Y=y_p, **self.hps).to(self.device).to(TORCH_DTYPE) + self.torch_model = model_class_dict[self.model_type](train_X=X_p, train_Y=y_p, **self.hps).to(TORCH_DTYPE) return @@ -143,6 +137,9 @@ def fit(self, self.task_feature = task_feature # Train + if self.verbose: + print('Fitting surrogate model [...]') + if isinstance(self.torch_model, GPyTorchModel): self.loss_fcn = ExactMarginalLogLikelihood(self.torch_model.likelihood, self.torch_model) if self.model_type == 'DKL': @@ -168,6 +165,9 @@ def fit(self, loss = self.loss_fcn(output, y_p) loss.backward() self.optimizer.step() + + if (epoch % 50 == 0 and self.verbose): + print(f'Epoch {epoch}: Loss {loss.item()}') self.torch_model.eval() diff --git a/obsidian/tests/test_optimizer_MOO.py b/obsidian/tests/test_optimizer_MOO.py index ba35068..5740c32 100644 --- a/obsidian/tests/test_optimizer_MOO.py +++ b/obsidian/tests/test_optimizer_MOO.py @@ -40,7 +40,7 @@ def Z0(X_space): pytest.param('DKL', marks=pytest.mark.slow), 'DNN']) def test_optimizer_fit(X_space, surrogate, Z0, serial_test=True): - optimizer = BayesianOptimizer(X_space, surrogate=surrogate, seed=0, verbose=0) + optimizer = BayesianOptimizer(X_space, surrogate=surrogate, seed=0, verbose=3) tol = 1e-2 if surrogate == 'DNN' else 1e-5 @@ -94,6 +94,15 @@ def test_optimizer_suggest(m_batch, fixed_var): df_suggest = pd.concat([X_suggest, eval_suggest], axis=1) +def test_suggest_searchspace(): + optimizer.X_space[0].set_search(2, 8) + + X_suggest, eval_suggest = optimizer.suggest(m_batch=2, **test_config) + df_suggest = pd.concat([X_suggest, eval_suggest], axis=1) + + optimizer.X_space.open_search() + + test_aqs = ['NEHVI', {'NEHVI': {'ref_point': [0.1, 0.1]}}, 'EHVI', diff --git a/obsidian/tests/test_optimizer_SOO.py b/obsidian/tests/test_optimizer_SOO.py index 69afa27..8477709 100644 --- a/obsidian/tests/test_optimizer_SOO.py +++ b/obsidian/tests/test_optimizer_SOO.py @@ -63,7 +63,7 @@ def test_f_transform(X_space, Z0, f_transform): pytest.param('DKL', marks=pytest.mark.slow), 'DNN']) def test_optimizer_fit(X_space, surrogate, Z0, serial_test=True): - optimizer = BayesianOptimizer(X_space, surrogate=surrogate, seed=0, verbose=0) + optimizer = BayesianOptimizer(X_space, surrogate=surrogate, seed=0, verbose=3) if surrogate == 'GPflat' and not X_space.X_cont: # GPflat will fail will a purely categorical space because the design matrix is not p.d. @@ -121,6 +121,16 @@ def test_optimizer_suggest(m_batch, fixed_var): df_suggest = pd.concat([X_suggest, eval_suggest], axis=1) +def test_suggest_searchspace(): + optimizer.X_space[0].set_search(2, 8) + optimizer.X_space[3].set_search(['A', 'C']) + + X_suggest, eval_suggest = optimizer.suggest(m_batch=2, **test_config) + df_suggest = pd.concat([X_suggest, eval_suggest], axis=1) + + optimizer.X_space.open_search() + + test_aqs = ['NEI', 'EI', {'EI': {'inflate': 0.05}}, diff --git a/obsidian/tests/test_parameters.py b/obsidian/tests/test_parameters.py index f38af40..05ae94a 100644 --- a/obsidian/tests/test_parameters.py +++ b/obsidian/tests/test_parameters.py @@ -141,6 +141,13 @@ def test_param_encoding_types(param, type_i): string_list = ['A', 'B', 'C', 'D'] +@pytest.mark.fast +def test_param_indexing(): + X_space = ParamSpace(test_params) + p0 = X_space[0] + p0 = X_space['Parameter 1'] + + @pytest.mark.fast def test_numeric_param_validation(): # Strings for numeric