Skip to content

Commit

Permalink
Major improvements to testing design and coverage
Browse files Browse the repository at this point in the history
  • Loading branch information
kstone40 committed Aug 14, 2024
1 parent 00f3d2b commit 5618593
Show file tree
Hide file tree
Showing 12 changed files with 376 additions and 211 deletions.
7 changes: 5 additions & 2 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,11 @@ per-file-ignores =
__init__.py: F401, F403
obsidian/dash/*: F401, F403

# Often creating variables but not accessing them in testing
obsidian/tests/*: F841
# Often importing and creating unaccessed objects during testing
obsidian/tests/*: F401, F841

# No good way around comparing types for recursive state-dict comparison
obsidian/tests/utils.py: E721

exclude =
projects/
Expand Down
25 changes: 16 additions & 9 deletions obsidian/tests/param_configs.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
"""Preset parameter configurations for unit testing"""

from obsidian.parameters import Param_Continuous, Param_Ordinal, Param_Categorical, \
Param_Observational, Param_Discrete_Numeric, ParamSpace

# Set up ap master list of parameter spaces for testing
params = [
Param_Continuous('Parameter 1', 0, 10),
Param_Continuous('Parameter 2', -20, 0),
Expand All @@ -15,14 +18,25 @@
Param_Ordinal('Parameter 11', ['N'])
]

# Subset some default selections
default = [params[i] for i in [0, 1, 2, 6]] # 2 continuous, 1 static, 1 categorical
X_sp_default = ParamSpace(params=default)

# Numeric
cont_small = [params[i] for i in [0, 1, 2]] # continuous including edge cases
numeric = [params[i] for i in [0, 1, 2, 3, 4, 5]] # numeric including edge cases
X_sp_cont_small = ParamSpace(params=cont_small)
X_sp_numeric = ParamSpace(params=numeric)

# Nominal
cat_small = [params[i] for i in [6, 7, 8]] # categorical including edge cases
disc_small = [params[i] for i in [6, 9]] # 1 categorical, 1 ordinal
disc_large = [params[i] for i in [6, 7, 8, 9, 10]] # discrete including edge cases
X_sp_cat_small = ParamSpace(params=cat_small)
X_sp_disc_small = ParamSpace(params=disc_small)
X_sp_disc_large = ParamSpace(params=disc_large)

# Set up a range of continuous parameters
params_cont_large = [
Param_Continuous('Parameter 1', 0, 10),
Param_Continuous('Parameter 2', 0, 10),
Expand All @@ -37,15 +51,8 @@
Param_Continuous('Parameter 11', 0, 10),
Param_Continuous('Parameter 12', 0, 10),
]

X_sp_default = ParamSpace(params=default)
X_sp_cont_small = ParamSpace(params=cont_small)
X_sp_cont_large = ParamSpace(params=params_cont_large)
X_sp_numeric = ParamSpace(params=numeric)
X_sp_cat_small = ParamSpace(params=cat_small)
X_sp_disc_small = ParamSpace(params=disc_small)
X_sp_disc_large = ParamSpace(params=disc_large)
X_sp_cont_ndims = [ParamSpace(params_cont_large[:i]) for i in range(len(params_cont_large))]

# Wrap everything for iteration during testing
test_X_space = [X_sp_default, X_sp_cont_small, X_sp_numeric, X_sp_cat_small, X_sp_disc_small, X_sp_disc_large]

X_sp_cont_ndims = [ParamSpace(params_cont_large[:i]) for i in range(len(params_cont_large))]
99 changes: 54 additions & 45 deletions obsidian/tests/test_campaign.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
"""PyTests for obsidian.campaign"""

from obsidian.tests.param_configs import X_sp_cont_ndims, X_sp_default
from obsidian.parameters import Target
from obsidian.experiment import Simulator
from obsidian.experiment.benchmark import two_leaves, shifted_parab
from obsidian.campaign import Campaign, Explainer, calc_ofat_ranges
from obsidian.objectives import Identity_Objective, Scalar_WeightedNorm, Feature_Objective, \
Objective_Sequence, Utopian_Distance, Index_Objective, Bounded_Target
from obsidian.objectives import Identity_Objective
from obsidian.plotting import plot_interactions, plot_ofat_ranges
from obsidian.exceptions import IncompatibleObjectiveError, UnfitError


from obsidian.tests.utils import DEFAULT_MOO_PATH
import json
from obsidian.tests.param_configs import X_sp_cont_ndims, X_sp_default
from obsidian.tests.utils import DEFAULT_MOO_PATH, equal_state_dicts

import pandas as pd
import pytest

import json

# Avoid using TkAgg which causes Tcl issues during testing
import matplotlib
matplotlib.use('inline')

Expand All @@ -29,79 +29,70 @@
@pytest.mark.parametrize('X_space, sim_fcn, target',
[(X_sp_cont_ndims[2], two_leaves, target_test[0]),
(X_sp_default, shifted_parab, target_test[1])])
def test_campaign(X_space, sim_fcn, target):
def test_campaign_basics(X_space, sim_fcn, target):
# Standard usage
campaign = Campaign(X_space, target)
simulator = Simulator(X_space, sim_fcn, eps=0.05)
X0 = campaign.suggest()
y0 = simulator.simulate(X0)
Z0 = pd.concat([X0, y0], axis=1)
campaign.m_exp

# Test some conditional usage
# Set an objective, suggest, clear
campaign.set_objective(Identity_Objective(mo=len(campaign.target) > 1))
campaign.suggest()
campaign.clear_objective()

# Add, fit, clear, examine
campaign.add_data(Z0)
campaign.fit()
campaign.clear_data()
campaign.y
campaign.__repr__()

# Add with iteration, examine, fit, analyze
Z0['Iteration'] = 5
campaign.add_data(Z0)
campaign.y
campaign.fit()
campaign.response_max

# Serialize, deserialize, re-serialize
obj_dict = campaign.save_state()
campaign2 = Campaign.load_state(obj_dict)
campaign2.__repr__()

campaign2.set_objective(Identity_Objective(mo=len(campaign.target) > 1))
campaign2.suggest()
obj_dict2 = campaign2.save_state()
assert equal_state_dicts(obj_dict, obj_dict2), 'Error during serialization'


# Load default
with open(DEFAULT_MOO_PATH) as json_file:
obj_dict = json.load(json_file)

campaign = Campaign.load_state(obj_dict)
X_space = campaign.X_space
target = campaign.target

test_objs = [Identity_Objective(mo=True),
Scalar_WeightedNorm(weights=[1, 1]),
Feature_Objective(X_space, indices=[0], coeff=[1]),
Objective_Sequence([Utopian_Distance([1], target[0]), Index_Objective()]),
Bounded_Target(bounds=[(0, 1), (0, 1)], targets=target),
None]


@pytest.mark.parametrize('obj', test_objs)
def test_campaign_objectives(obj):
campaign.set_objective(obj)
if campaign.objective:
campaign.objective.__repr__()
campaign.o

obj_dict = campaign.save_state()
campaign2 = Campaign.load_state(obj_dict)
campaign2.save_state()
campaign2.__repr__()
campaign2.clear_objective()


def test_explain():
# Standard usage
exp = Explainer(campaign.optimizer)
exp.__repr__
exp.shap_explain(n=50)
exp.__repr__

# Test SHAP plots
exp.shap_summary()
fig = exp.shap_summary_bar()
exp.shap_summary_bar()

# Test PDP-ICE, with options
exp.shap_pdp_ice(ind=0, ice_color_var=None, npoints=10)
exp.shap_pdp_ice(ind=0, npoints=10)
exp.shap_pdp_ice(ind=(0, 1), npoints=5)

# Test pairwise SHAP analysis, with options
X_new = campaign.X.iloc[0, :]
X_ref = campaign.X.loc[1, :]
df_shap_value_new, fig_bar, fig_line = exp.shap_single_point(X_new)
df_shap_value_new, fig_bar, fig_line = exp.shap_single_point(X_new, X_ref=X_ref)

# Test sensitivity analysis, with options
df_sens = exp.sensitivity()
df_sens = exp.sensitivity(X_ref=X_ref)

Expand All @@ -112,71 +103,89 @@ def test_explain():

@pytest.mark.parametrize('X_ref', X_ref_test)
def test_analysis(X_ref):
# OFAT ranges with/out interactions and with/out X_ref
ofat_ranges, _ = calc_ofat_ranges(campaign.optimizer, threshold=0.5, X_ref=X_ref, calc_interacts=False)
ofat_ranges, cor = calc_ofat_ranges(campaign.optimizer, threshold=0.5, X_ref=X_ref)
plot_interactions(campaign.optimizer, cor)
plot_ofat_ranges(campaign.optimizer, ofat_ranges)

# OFAT ranges where all results should be NaN
ofat_ranges, cor = calc_ofat_ranges(campaign.optimizer, threshold=9999, X_ref=X_ref)
plot_interactions(campaign.optimizer, cor)
plot_ofat_ranges(campaign.optimizer, ofat_ranges)


# VALIDATION TESTS - Force errors to be raised in object usage

@pytest.mark.fast
def test_campaign_validation():

# Missing X names
random_data = pd.DataFrame(data={'A': [1, 2, 3], 'B': [4, 5, 6]})
with pytest.raises(KeyError):
campaign.add_data(random_data)


# Missing Y names
with pytest.raises(KeyError):
campaign.add_data(campaign.X)

with pytest.raises(IncompatibleObjectiveError):
campaign.set_objective(Identity_Objective(mo=False))

# Missing data
with pytest.raises(ValueError):
campaign2 = Campaign(X_space, target)
campaign2.fit()

# Bad objective
with pytest.raises(IncompatibleObjectiveError):
campaign.set_objective(Identity_Objective(mo=False))


@pytest.mark.fast
def test_explainer_validation():

# Unfit optimizer
campaign2 = Campaign(X_space, target)
with pytest.raises(UnfitError):
exp = Explainer(campaign2.optimizer)


# Unfit SHAP
exp = Explainer(campaign.optimizer)
with pytest.raises(UnfitError):
exp.shap_summary()


# Unfit SHAP
with pytest.raises(UnfitError):
exp.shap_summary_bar()


# Unfit SHAP
with pytest.raises(UnfitError):
exp.shap_single_point(X_new=campaign.X_space.mean())

random_data = pd.DataFrame(data={'A': [1], 'B': [4]})
long_data = pd.DataFrame(data={'Parameter 1': [1, 2], 'Parameter 2': [1, 2]})

# Missing X names
with pytest.raises(ValueError):
exp.shap_explain(n=50, X_ref=random_data)

# X_ref > 1 row
with pytest.raises(ValueError):
exp.shap_explain(n=50, X_ref=long_data)

exp.shap_explain(n=50)

# Missing X names
with pytest.raises(ValueError):
exp.shap_single_point(X_new=random_data)

# Missing X names
with pytest.raises(ValueError):
exp.shap_single_point(X_new=campaign.X_space.mean(), X_ref=random_data)

# Missing X names
with pytest.raises(ValueError):
exp.sensitivity(X_ref=random_data)

# X_ref > 1 row
with pytest.raises(ValueError):
exp.sensitivity(X_ref=long_data)

Expand Down
64 changes: 64 additions & 0 deletions obsidian/tests/test_constraints.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
"""PyTests for obsidian.constraints"""

from obsidian.campaign import Campaign
from obsidian.constraints import (
OutConstraint_Blank,
InConstraint_Generic,
InConstraint_ConstantDim,
OutConstraint_L1
)
from obsidian.tests.utils import DEFAULT_MOO_PATH

import pandas as pd
import pytest
import json

# Load defaults
with open(DEFAULT_MOO_PATH) as json_file:
obj_dict = json.load(json_file)
campaign = Campaign.load_state(obj_dict)

optimizer = campaign.optimizer
X_space = campaign.X_space
target = campaign.target

test_ineq = [[InConstraint_Generic(X_space, indices=[0, 1], coeff=[1, 1], rhs=5)]]
test_nleq = [[InConstraint_ConstantDim(X_space, dim=0, tol=0.1)]]
test_out = [[OutConstraint_Blank(target)], [OutConstraint_L1(target, offset=1)]]

# Run very short optimizations for testing
test_config = {'optim_samples': 2, 'optim_restarts': 2}


@pytest.mark.parametrize('ineq_constraints', test_ineq)
def test_ineq_constraints(ineq_constraints):
X_suggest, eval_suggest = optimizer.suggest(ineq_constraints=ineq_constraints,
**test_config)
df_suggest = pd.concat([X_suggest, eval_suggest], axis=1)


@pytest.mark.parametrize('nleq_constraints', test_nleq)
def test_nleq_constraints(nleq_constraints):
X_suggest, eval_suggest = optimizer.suggest(nleq_constraints=nleq_constraints,
**test_config)
df_suggest = pd.concat([X_suggest, eval_suggest], axis=1)


@pytest.mark.parametrize('out_constraints', test_out)
def test_out_constraints(out_constraints):
X_suggest, eval_suggest = optimizer.suggest(out_constraints=out_constraints,
**test_config)
df_suggest = pd.concat([X_suggest, eval_suggest], axis=1)


@pytest.mark.slow
def test_combo_constraints():
X_suggest, eval_suggest = optimizer.suggest(ineq_constraints=test_ineq[0],
nleq_constraints=test_nleq[0],
out_constraints=test_out[0],
**test_config)
df_suggest = pd.concat([X_suggest, eval_suggest], axis=1)


if __name__ == '__main__':
pytest.main([__file__, '-m', 'not slow'])
2 changes: 2 additions & 0 deletions obsidian/tests/test_experiment.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
"""PyTests for obsidian.experiment"""

from obsidian.tests.param_configs import test_X_space

from obsidian.experiment import ExpDesigner, Simulator
Expand Down
Loading

0 comments on commit 5618593

Please sign in to comment.