Skip to content

Commit

Permalink
faster tests
Browse files Browse the repository at this point in the history
  • Loading branch information
cahity committed Nov 28, 2024
1 parent ebe7c95 commit 10bf30d
Show file tree
Hide file tree
Showing 11 changed files with 34 additions and 38 deletions.
4 changes: 2 additions & 2 deletions test/algorithms/test_auer.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def setUp(self):
self.order = ComponentwiseOrder(2)
self.noise_var = 0.00001
self.dataset_cardinality = get_dataset_instance(self.dataset_name)._cardinality
self.conf_contraction = 4
self.conf_contraction = 32
self.algorithm = Auer(
epsilon=self.epsilon,
delta=self.delta,
Expand Down Expand Up @@ -72,7 +72,7 @@ def test_whole_class(self):
list(pareto_indices),
self.epsilon,
)
self.assertTrue(eps_f1 > 0.9)
self.assertGreaterEqual(eps_f1, 0.9)

def test_run_one_step(self):
"""Test the run_one_step method."""
Expand Down
8 changes: 4 additions & 4 deletions test/algorithms/test_decoupled.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@ def setUp(self):
"""A basic setup for the model."""
set_seed(SEED)

self.epsilon = 0.1
self.epsilon = 0.2
self.delta = 0.1
self.dataset_name = "Test"
self.order = ComponentwiseOrder(2)
self.dataset_cardinality = get_dataset_instance(self.dataset_name)._cardinality
self.noise_var = 0.00001
self.costs = [1.0, 1.5]
self.cost_budget = 64
self.cost_budget = 24
self.algorithm = DecoupledGP(
dataset_name=self.dataset_name,
order=self.order,
Expand Down Expand Up @@ -56,14 +56,14 @@ def test_whole_class(self):
list(pareto_indices),
self.epsilon,
)
self.assertTrue(eps_f1 > 0.9) # Even though algorithm is not using epsilon.
self.assertGreaterEqual(eps_f1, 0.9) # Even though algorithm is not using epsilon.
self.assertLess(self.algorithm.total_cost, self.cost_budget + max(self.costs))
self.assertLessEqual(self.algorithm.total_cost, self.algorithm.round * max(self.costs))
self.assertGreaterEqual(self.algorithm.total_cost, self.algorithm.round * min(self.costs))

def test_run_one_step(self):
"""Test the run_one_step method."""
num_rounds = 10
num_rounds = 5
alg_done = False
for i in range(num_rounds): # Run for 10 rounds, it should be enough.
if not alg_done and i <= 3: # Save the state at round 3 at the latest.
Expand Down
6 changes: 3 additions & 3 deletions test/algorithms/test_epal.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@ def setUp(self):
# Set random seed for reproducibility
set_seed(SEED)

self.epsilon = 0.1
self.epsilon = 0.2
self.delta = 0.1
self.noise_var = 0.00001
self.dataset_name = "Test"
self.conf_contraction = 9
self.conf_contraction = 128

self.iter_count = 10
self.iter_count = 5
self.output_dim = 2
self.order = ComponentwiseOrder(self.output_dim)
self.dataset_cardinality = get_dataset_instance(self.dataset_name)._cardinality
Expand Down
2 changes: 1 addition & 1 deletion test/algorithms/test_naive.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def test_whole_class(self):
list(pareto_indices),
self.epsilon,
)
self.assertTrue(eps_f1 > 0.9)
self.assertGreaterEqual(eps_f1, 0.9)

def test_run_one_step(self):
"""Test the run_one_step method."""
Expand Down
12 changes: 5 additions & 7 deletions test/algorithms/test_paveba.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@ def setUp(self):
"""A basic setup for the model."""
set_seed(SEED)

self.epsilon = 0.1
self.epsilon = 0.2
self.delta = 0.1
self.dataset_name = "Test"
self.order = ComponentwiseOrder(2)
self.noise_var = 0.00001
self.dataset_cardinality = get_dataset_instance(self.dataset_name)._cardinality
self.conf_contraction = 1
self.conf_contraction = 1024
self.algo = PaVeBa(
epsilon=self.epsilon,
delta=self.delta,
Expand Down Expand Up @@ -57,13 +57,11 @@ def test_whole_class(self):
list(pareto_indices),
self.epsilon,
)
self.assertTrue(eps_f1 > 0.9)
self.assertGreaterEqual(eps_f1, 0.9)

def test_run_one_step(self):
"""Test the run_one_step method."""
self.algo.conf_contraction = 32

num_rounds = 10
num_rounds = 5
alg_done = False
for i in range(num_rounds): # Run for 10 rounds, it should be enough.
if not alg_done and i <= 3: # Save the state at round 3 at the latest.
Expand All @@ -83,7 +81,7 @@ def test_compute_radius(self):
self.algo.run_one_step()
t1 = 8 * self.noise_var
t2 = np.log((np.pi**2 * (3) * self.dataset_cardinality) / (6 * 0.1))
r1 = np.sqrt(t1 * t2)
r1 = np.sqrt(t1 * t2) / self.conf_contraction
r2 = self.algo.compute_radius()
self.assertTrue(r1 == r2)

Expand Down
10 changes: 5 additions & 5 deletions test/algorithms/test_paveba_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,13 @@ def setUp(self):
# A basic setup for the model.
set_seed(SEED)

self.epsilon = 0.1
self.epsilon = 0.2
self.delta = 0.1
self.dataset_name = "Test"
self.order = ComponentwiseOrder(2)
self.noise_var = 0.00001
self.dataset_cardinality = get_dataset_instance(self.dataset_name)._cardinality
self.conf_contraction = 4
self.conf_contraction = 256
self.algo = PaVeBaGP(
epsilon=self.epsilon,
delta=self.delta,
Expand Down Expand Up @@ -56,11 +56,11 @@ def test_whole_class(self):
list(pareto_indices),
self.epsilon,
)
self.assertTrue(eps_f1 > 0.9)
self.assertGreaterEqual(eps_f1, 0.9)

def test_run_one_step_with_hyperrectangle(self):
"""Test the run_one_step method."""
num_rounds = 10
num_rounds = 5
alg_done = False
for i in range(num_rounds): # Run for 10 rounds, it should be enough.
if not alg_done and i <= 3: # Save the state at round 3 at the latest.
Expand Down Expand Up @@ -88,7 +88,7 @@ def test_run_one_step_with_ellipsoid(self):
type="DE",
)

num_rounds = 5
num_rounds = 3
alg_done = False
for i in range(num_rounds): # Run for 10 rounds, it should be enough.
if not alg_done and i <= 2: # Save the state at round 3 at the latest.
Expand Down
10 changes: 5 additions & 5 deletions test/algorithms/test_paveba_partial_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,15 @@ def setUp(self):
"""A basic setup for the model."""
set_seed(SEED)

self.epsilon = 0.1
self.epsilon = 0.2
self.delta = 0.1
self.dataset_name = "Test"
self.order = ComponentwiseOrder(2)
self.dataset_cardinality = get_dataset_instance(self.dataset_name)._cardinality
self.noise_var = 0.00001
self.conf_contraction = 4
self.conf_contraction = 32
self.costs = [1.0, 1.5]
self.cost_budget = 64
self.cost_budget = 24
self.algo = PaVeBaPartialGP(
epsilon=self.epsilon,
delta=self.delta,
Expand Down Expand Up @@ -61,14 +61,14 @@ def test_whole_class(self):
list(pareto_indices),
self.epsilon,
)
self.assertTrue(eps_f1 > 0.9)
self.assertGreaterEqual(eps_f1, 0.9)
self.assertLess(self.algo.total_cost, self.cost_budget + max(self.costs))
self.assertLessEqual(self.algo.total_cost, self.algo.round * max(self.costs))
self.assertGreaterEqual(self.algo.total_cost, self.algo.round * min(self.costs))

def test_run_one_step(self):
"""Test the run_one_step method."""
num_rounds = 10
num_rounds = 5
alg_done = False
for i in range(num_rounds): # Run for 10 rounds, it should be enough.
if not alg_done and i <= 3: # Save the state at round 3 at the latest.
Expand Down
6 changes: 3 additions & 3 deletions test/algorithms/test_vogp.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@ def setUp(self):
set_seed(SEED)

# Parameters for VOGP instance
self.epsilon = 0.1
self.epsilon = 0.2
self.delta = 0.1
self.noise_var = 0.00001
self.conf_contraction = 4
self.conf_contraction = 64
self.dataset_name = "Test"
self.order = ComponentwiseOrder(2)

self.iter_count = 10
self.iter_count = 5

# Create the VOGP instance
self.algorithm = VOGP(
Expand Down
8 changes: 3 additions & 5 deletions test/algorithms/test_vogp_ad.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,13 @@ def setUp(self):
set_seed(SEED)

# Parameters for VOGP instance
self.epsilon = 0.1
self.epsilon = 0.2
self.delta = 0.1
self.noise_var = 0.00001
self.problem_name = "BraninCurrin"
self.problem: ContinuousProblem = get_continuous_problem(self.problem_name, self.noise_var)
self.order = ComponentwiseOrder(2)
self.conf_contraction = 64

self.iter_count = 1
self.conf_contraction = 128

# Create the VOGP instance
self.algorithm = VOGP_AD(
Expand Down Expand Up @@ -78,5 +76,5 @@ def test_whole_class(self):
)

self.assertLessEqual(
log_hv_discrepancy, -3.5, "Log. hypervolume discrepancy should be reasonably low."
log_hv_discrepancy, -2.5, "Log. hypervolume discrepancy should be reasonably low."
)
2 changes: 1 addition & 1 deletion test/models/test_gpytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ def setUp(self):

def test_get_model_without_initial_data(self):
"""Test model creation when X and Y are not provided."""
self.problem.evaluate.return_value = np.random.randn(1024, self.output_dim)
self.problem.evaluate.return_value = np.random.randn(512, self.output_dim)

model = get_gpytorch_modellist_w_known_hyperparams(
problem=self.problem,
Expand Down
4 changes: 2 additions & 2 deletions vopy/models/gpytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ def get_gpytorch_model_w_known_hyperparams(
:rtype: GPyTorchMultioutputExactModel
"""
if X is None:
X = generate_sobol_samples(problem.in_dim, 1024) # TODO: magic number
X = generate_sobol_samples(problem.in_dim, 512) # TODO: magic number
if Y is None:
Y = problem.evaluate(X)

Expand Down Expand Up @@ -795,7 +795,7 @@ def get_gpytorch_modellist_w_known_hyperparams(
:rtype: GPyTorchModelListExactModel
"""
if X is None:
X = generate_sobol_samples(problem.in_dim, 1024) # TODO: magic number
X = generate_sobol_samples(problem.in_dim, 512) # TODO: magic number
if Y is None:
Y = problem.evaluate(X)

Expand Down

0 comments on commit 10bf30d

Please sign in to comment.