Skip to content

Commit

Permalink
polishing some tests
Browse files Browse the repository at this point in the history
  • Loading branch information
cahity committed Nov 14, 2024
1 parent 74bd83d commit 28ece41
Show file tree
Hide file tree
Showing 5 changed files with 189 additions and 65 deletions.
124 changes: 124 additions & 0 deletions test/test_maximization_problem.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
import importlib
from unittest import TestCase, mock

import numpy as np

from vectoptal.utils import set_seed
from vectoptal.utils.seed import SEED
from vectoptal.datasets import Dataset
from vectoptal.maximization_problem import (
ProblemFromDataset,
ContinuousProblem,
get_continuous_problem,
DecoupledEvaluationProblem,
)


class TestProblemFromDataset(TestCase):
"""Test the ProblemFromDataset class."""

def test_evaluate(self):
set_seed(SEED)

dataset = mock.Mock(spec=Dataset)

dataset.in_dim = 2
dataset.out_dim = 1
dataset.in_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
dataset.out_data = np.array([[0], [1], [2], [3]])

problem = ProblemFromDataset(dataset, noise_var=0.1)

x = np.array([[0.1, 0.1], [0.1, 0.8]])
y = problem.evaluate(x, noisy=False)
np.testing.assert_array_equal(y, np.array([[0], [1]]))

y = problem.evaluate(x, noisy=True)
self.assertNotEqual(np.prod(y), 0)


class TestDecoupledEvaluationProblem(TestCase):
"""Test the DecoupledEvaluationProblem class."""

def test_evaluate(self):
set_seed(SEED)

x = np.array([[0.1, 0.1], [0.1, 0.8]])
y = np.array([[0, 1], [2, 3]])

dataset = mock.Mock(spec=Dataset)

dataset.in_dim = 2
dataset.out_dim = 2
dataset.in_data = x
dataset.out_data = y

problem = ProblemFromDataset(dataset, noise_var=0.1)

decoupled_problem = DecoupledEvaluationProblem(problem)

# Test kwargs
y_pred = decoupled_problem.evaluate(x, None, noisy=False)
np.testing.assert_array_equal(y_pred, y)
y_pred = decoupled_problem.evaluate(x, None, noisy=True)
self.assertNotEqual(np.prod(y_pred), 0)

# Test evaluation index
y_pred = decoupled_problem.evaluate(x, 0, noisy=False)
np.testing.assert_array_equal(y_pred, np.array([0, 2]))
y_pred = decoupled_problem.evaluate(x, [0, 1], noisy=False)
np.testing.assert_array_equal(y_pred, np.array([0, 3]))


class TestContinuousProblem(TestCase):
"""Test the ContinuousProblem class."""

def setUp(self):
set_seed(SEED)

module = importlib.import_module(name="vectoptal.maximization_problem")
module_globals = module.__dict__

self.problem_names = [
obj.__name__
for obj in module_globals.values()
if isinstance(obj, type)
and issubclass(obj, ContinuousProblem)
and obj is not ContinuousProblem
]

self.noise_var = 0.1

def test_get_continuous_problem(self):
for name in self.problem_names:
with self.subTest(name=name):
problem = get_continuous_problem(name, self.noise_var)
self.assertIsInstance(problem, ContinuousProblem)

with self.assertRaises(ValueError):
get_continuous_problem("weird_problem_name", self.noise_var)

def test_attributes(self):
for name in self.problem_names:
with self.subTest(name=name):
problem = get_continuous_problem(name, self.noise_var)
self.assertTrue(hasattr(problem, "out_dim"))

def test_evaluation(self):
class MockProblem(ContinuousProblem):
out_dim = 2

def __init__(self):
super().__init__(0.1)

def evaluate_true(self, x):
return x

problem = MockProblem()

x = np.array([0, 1])
y_pred = problem.evaluate(x, noisy=False)
np.testing.assert_array_equal(y_pred, x.reshape(-1, problem.out_dim))

y_pred = problem.evaluate(x, noisy=True)
self.assertNotEqual(np.prod(y_pred), 0)
27 changes: 19 additions & 8 deletions test/utils/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,29 +87,40 @@ def test_get_alpha_vec(self):
class TestGetClosestIndicesFromPoints(TestCase):
"""Test closest indices computation."""

def setUp(self):
self.points = torch.tensor([[0, 0], [1, 1]])
self.queries = torch.tensor([[0.1, 0.1], [0.5, 0.9], [1.2, 1.2]])

def test_does_return_first_match(self):
"""Test if the get_closest_indices_from_points returns the first between equals."""
result = get_closest_indices_from_points(
torch.tensor([[0.5, 0.5]]), self.points, return_distances=False, squared=False
)
self.assertListEqual(result.tolist(), [0])

def test_get_closest_indices_from_points(self):
"""Test the get_closest_indices_from_points function."""
points = torch.tensor([[0, 0], [1, 1]])
queries = torch.tensor([[0.1, 0.1], [0.5, 0.9], [1.2, 1.2]])

self.assertListEqual(
get_closest_indices_from_points(queries, [], return_distances=False, squared=False), []
get_closest_indices_from_points(
self.queries, [], return_distances=False, squared=False
),
[],
)

result_sq = get_closest_indices_from_points(
queries, points, return_distances=False, squared=True
self.queries, self.points, return_distances=False, squared=True
)
result = get_closest_indices_from_points(
queries, points, return_distances=False, squared=False
self.queries, self.points, return_distances=False, squared=False
)
self.assertListEqual(result_sq.tolist(), result.tolist())
self.assertListEqual(result.tolist(), [0, 1, 1])

result_sq, dists_sq = get_closest_indices_from_points(
queries, points, return_distances=True, squared=True
self.queries, self.points, return_distances=True, squared=True
)
result, dists = get_closest_indices_from_points(
queries, points, return_distances=True, squared=False
self.queries, self.points, return_distances=True, squared=False
)

self.assertListEqual(result_sq.tolist(), result.tolist())
Expand Down
21 changes: 5 additions & 16 deletions vectoptal/datasets/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
"""

import os
from abc import ABC, abstractmethod
from abc import ABC

import numpy as np
from sklearn.preprocessing import StandardScaler, MinMaxScaler
Expand All @@ -29,27 +29,16 @@
class Dataset(ABC):
"""
Abstract base class for datasets that handles min-max scaling of input and standardization of
output. Any class inheriting from this class should implement the following properties:
output. Any class inheriting from this class should implement the following attributes:
- :obj:`_in_dim`: :type:`int`
- :obj:`_out_dim`: :type:`int`
- :obj:`_cardinality`: :type:`int`
"""

@property
@abstractmethod
def _in_dim(self) -> int:
pass

@property
@abstractmethod
def _out_dim(self) -> int:
pass

@property
@abstractmethod
def _cardinality(self) -> int:
pass
_in_dim: int
_out_dim: int
_cardinality: int

def __init__(self):
if (
Expand Down
13 changes: 3 additions & 10 deletions vectoptal/design_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,21 +44,14 @@ class DiscreteDesignSpace(DesignSpace):
represents a design space where the points are discrete. The class also maintains a list of
confidence regions associated with the design points.
A derived class must define the following properties:
A derived class must define the following attributes:
- :obj:`points`: :type:`np.ndarray`
- :obj:`confidence_regions`: :type:`list[ConfidenceRegion]`
"""

@property
@abstractmethod
def points(self) -> np.ndarray:
pass

@property
@abstractmethod
def confidence_regions(self) -> list[ConfidenceRegion]:
pass
points: np.ndarray
confidence_regions: list[ConfidenceRegion]

def __init__(self):
super().__init__()
Expand Down
69 changes: 38 additions & 31 deletions vectoptal/maximization_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,18 +87,15 @@ def evaluate(self, x: np.ndarray, noisy: bool = True) -> np.ndarray:
class ContinuousProblem(Problem):
"""
Abstract base class for continuous optimization problems. It includes noise handling for
outputs based on a specified noise variance. It should have the following property defined:
outputs based on a specified noise variance. It should have the following attribute defined:
- :obj:`out_dim`: :type:`int`
:param noise_var: The variance of the noise to be added to the outputs.
:type noise_var: float
"""

@property
@abstractmethod
def out_dim(self) -> int:
pass
out_dim: int

def __init__(self, noise_var: float) -> None:
super().__init__()
Expand All @@ -108,6 +105,34 @@ def __init__(self, noise_var: float) -> None:
noise_covar = np.eye(self.out_dim) * noise_var
self.noise_cholesky = np.linalg.cholesky(noise_covar)

@abstractmethod
def evaluate_true(self, x: np.ndarray) -> np.ndarray:
pass

def evaluate(self, x: np.ndarray, noisy: bool = True) -> np.ndarray:
"""
Evaluates the problem at given points with optional Gaussian noise.
:param x: Input points to evaluate, given as an array of shape (N, 2).
:type x: np.ndarray
:param noisy: If `True`, adds Gaussian noise to the output based on the specified
noise variance. Defaults to `True`.
:type noisy: bool
:return: A 2D array with evaluated Branin and Currin values for each input,
with optional noise.
:rtype: np.ndarray
"""
if x.ndim == 1:
x = x.reshape(1, -1)

f = self.evaluate_true(x)

if not noisy:
return f

y = get_noisy_evaluations_chol(f, self.noise_cholesky)
return y


def get_continuous_problem(name: str, noise_var: float) -> ContinuousProblem:
"""
Expand Down Expand Up @@ -208,30 +233,6 @@ def evaluate_true(self, x: np.ndarray) -> np.ndarray:
Y = np.stack([-branin, -currin], axis=1)
return Y

def evaluate(self, x: np.ndarray, noisy: bool = True) -> np.ndarray:
"""
Evaluates the problem at given points with optional Gaussian noise.
:param x: Input points to evaluate, given as an array of shape (N, 2).
:type x: np.ndarray
:param noisy: If `True`, adds Gaussian noise to the output based on the specified
noise variance. Defaults to `True`.
:type noisy: bool
:return: A 2D array with evaluated Branin and Currin values for each input,
with optional noise.
:rtype: np.ndarray
"""
if x.ndim == 1:
x = x.reshape(1, -1)

f = self.evaluate_true(x)

if not noisy:
return f

y = get_noisy_evaluations_chol(f, self.noise_cholesky)
return y


class DecoupledEvaluationProblem(Problem):
"""
Expand All @@ -248,7 +249,10 @@ def __init__(self, problem: Problem) -> None:
self.problem = problem

def evaluate(
self, x: np.ndarray, evaluation_index: Optional[Union[int, List[int]]] = None
self,
x: np.ndarray,
evaluation_index: Optional[Union[int, List[int]]] = None,
**evaluate_kwargs: dict,
) -> np.ndarray:
"""
Evaluates the underlying problem at the given points and returns either the full
Expand All @@ -261,6 +265,9 @@ def evaluate(
- an `int` to return a specific objective across all points,
- a list of indices to return specific objectives for each point.
:type evaluation_index: Optional[Union[int, List[int]]]
:param evaluate_kwargs: Additional keyword arguments to pass to the evaluation function of
the underlying problem.
:type evaluate_kwargs: dict
:return: An array of evaluated values, either the full output or specific objectives.
:rtype: np.ndarray
:raises ValueError: If :obj:`evaluation_index` has an invalid format or length.
Expand All @@ -274,7 +281,7 @@ def evaluate(
"evaluation_index must; be None, have type int or have the same length as x."
)

values = self.problem.evaluate(x)
values = self.problem.evaluate(x, **evaluate_kwargs)

if evaluation_index is None:
return values
Expand Down

0 comments on commit 28ece41

Please sign in to comment.