diff --git a/doc/changelog.rst b/doc/changelog.rst index f31588013..97bf919d3 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -5,6 +5,16 @@ Changelog ========= +2.2.0.rc1 +--------- + +New features +~~~~~~~~~~~~ +- Cost functions in iminuit.cost are now additive, creating a new cost function with the + union of parameters that returns the sum of the results of the individual cost functions +- iminuit.cost.NormalConstraint was added as a means to add soft constraints on a + parameter, can also be used to set up a covariance matrix between several parameters + 2.1.0 (December 18, 2020) ------------------------- diff --git a/src/iminuit/cost.py b/src/iminuit/cost.py index 1288f7575..a247dbd61 100644 --- a/src/iminuit/cost.py +++ b/src/iminuit/cost.py @@ -4,6 +4,7 @@ from .util import describe, make_func_code import numpy as np +from collections.abc import Sequence def _safe_log(x): @@ -61,39 +62,164 @@ def _sum_z_squared_soft_l1(y, ye, ym): class Cost: - """Common base class for cost functions. + """Base class for all cost functions.""" - **Attributes** + __slots__ = "_func_code", "_verbose" + + @property + def errordef(self): + """For internal use.""" + return 1.0 + + @property + def func_code(self): + """For internal use.""" + return self._func_code + + @property + def verbose(self): + """Verbosity level. + + Set this to 1 to print all function calls with input and output. + """ + return self._verbose + + @verbose.setter + def verbose(self, value: int): + self._verbose = value + + def __init__(self, args: tuple, verbose: int): + self._func_code = make_func_code(args) + self._verbose = verbose + + def __add__(self, rhs): + return CostSum(self, rhs) + + def __call__(self, *args): + r = self._call(args) + if self.verbose >= 1: + print(args, "->", r) + return r + + +class MaskedCost(Cost): + """Base class for cost functions that support data masking.""" + + __slots__ = "_mask", "_masked" + + def __init__(self, args, verbose): + super().__init__(args, verbose) + self.mask = None + + @property + def mask(self): + """Boolean array, array of indices, or None. - mask : array-like or None If not None, only values selected by the mask are considered. The mask acts on the first dimension of a value array, i.e. values[mask]. Default is None. - verbose : int - Verbosity level. Default is 0. - errordef : int - Error definition constant used by Minuit. For internal use. + """ + return self._mask + + @mask.setter + def mask(self, mask): + self._mask = None if mask is None else np.asarray(mask) + self._masked = self._make_masked() + + def _make_masked(self): + return self._data if self._mask is None else self._data[self._mask] + + +class CostSum(Cost, Sequence): + """Sum of cost functions. + + Users do not need to create objects of this class themselves. They should just add + cost functions, for example: + + nll = UnbinnedNLL(...) + lsq = LeastSquares(...) + ncs = NormalConstraint(...) + csum = nll + lsq + ncs + + CostSum is used to combine data from different experiments or to combine normal cost + functions with soft constraints (see NormalConstraint). + + The parameters of CostSum are the union of all parameters of its constituents. + Supports the sequence protocol to access the constituents. """ - mask = None - verbose = 0 + __slots__ = "_items", "_maps" + + def __init__(self, *items): + self._items = [] + for item in items: + if isinstance(item, CostSum): + self._items += item._items + else: + self._items.append(item) + args = self._update() + super().__init__(args, max(c.verbose for c in self._items)) + + def _call(self, args): + r = 0.0 + for c, m in zip(self._items, self._maps): + a = tuple(args[mi] for mi in m) + r += c._call(a) + return r + + def _update(self): + out_args = [] + in_args = tuple(c._func_code.co_varnames for c in self._items) + for args in in_args: + for arg in args: + if arg not in out_args: + out_args.append(arg) + self._maps = [] + for args in in_args: + pos = tuple(out_args.index(arg) for arg in args) + self._maps.append(pos) + return tuple(out_args) + + def __len__(self): + return self._items.__len__() + + def __getitem__(self, key): + return self._items.__getitem__(key) + + +class UnbinnedCost(MaskedCost): + """Base class for unbinned cost functions.""" + + __slots__ = "_data", "_model" @property - def errordef(self): - return self._errordef + def data(self): + """Unbinned samples.""" + return self._data - def __init__(self, args, verbose, errordef): - self.func_code = make_func_code(args) - self.verbose = verbose - self._errordef = errordef + @data.setter + def data(self, value): + self._data[:] = value + def __init__(self, data, model, verbose): + self._data = _norm(data) + self._model = model + super().__init__(describe(model)[1:], verbose) -class UnbinnedNLL(Cost): + +class UnbinnedNLL(UnbinnedCost): """Unbinned negative log-likelihood. Use this if only the shape of the fitted PDF is of interest and the original unbinned data is available. """ + __slots__ = () + + @property + def pdf(self): + """PDF that describes the data.""" + return self._model + def __init__(self, data, pdf, verbose=0): """ **Parameters** @@ -106,30 +232,32 @@ def __init__(self, data, pdf, verbose=0): where `data` is the data sample and par0, ... parN are model parameters. verbose: int, optional - Verbosity level + Verbosity level. - 0: is no output (default) - 1: print current args and negative log-likelihood value """ - self.data = np.atleast_1d(data) - self.pdf = pdf - Cost.__init__(self, describe(self.pdf)[1:], verbose, 0.5) + super().__init__(data, pdf, verbose) - def __call__(self, *args): - data = self.data if self.mask is None else self.data[self.mask] - r = -_sum_log_x(self.pdf(data, *args)) - if self.verbose >= 1: - print(args, "->", r) - return r + def _call(self, args): + data = self._masked + return -2.0 * _sum_log_x(self._model(data, *args)) -class ExtendedUnbinnedNLL(Cost): +class ExtendedUnbinnedNLL(UnbinnedCost): """Unbinned extended negative log-likelihood. Use this if shape and normalization of the fitted PDF are of interest and the original unbinned data is available. """ + __slots__ = () + + @property + def scaled_pdf(self): + """Density function that describes the data.""" + return self._model + def __init__(self, data, scaled_pdf, verbose=0): """ **Parameters** @@ -138,10 +266,9 @@ def __init__(self, data, scaled_pdf, verbose=0): Sample of observations. scaled_pdf: callable - Scaled probability density function of the form f(data, par0, par1, ..., - parN), where `data` is the data sample and par0, ... parN are model - parameters. Must return a tuple (, - ). + Density function of the form f(data, par0, par1, ..., parN), where `data` is + the data sample and par0, ... parN are model parameters. Must return a tuple + (, ). verbose: int, optional Verbosity level @@ -149,25 +276,61 @@ def __init__(self, data, scaled_pdf, verbose=0): - 0: is no output (default) - 1: print current args and negative log-likelihood value """ - self.data = np.atleast_1d(data) - self.scaled_pdf = scaled_pdf - Cost.__init__(self, describe(self.scaled_pdf)[1:], verbose, 0.5) + super().__init__(data, scaled_pdf, verbose) - def __call__(self, *args): - data = self.data if self.mask is None else self.data[self.mask] - ns, s = self.scaled_pdf(data, *args) - r = ns - _sum_log_x(s) - if self.verbose >= 1: - print(args, "->", r) - return r + def _call(self, args): + data = self._masked + ns, s = self._model(data, *args) + return 2.0 * (ns - _sum_log_x(s)) + + +class BinnedCost(MaskedCost): + """Base class for binned cost functions.""" + + __slots__ = "_n", "_xe", "_model" + + @property + def n(self): + """Counts per bin.""" + return self._n + + @n.setter + def n(self, value): + self._n[:] = value + + @property + def xe(self): + """Bin edges.""" + return self._xe + + @xe.setter + def xe(self, value): + self.xe[:] = value + def __init__(self, n, xe, model, verbose): + self._n = _norm(n) + self._xe = _norm(xe) + self._model = model -class BinnedNLL(Cost): + if np.any((np.array(self._n.shape) + 1) != self._xe.shape): + raise ValueError("n and xe have incompatible shapes") + + super().__init__(describe(model)[1:], verbose) + + +class BinnedNLL(BinnedCost): """Binned negative log-likelihood. Use this if only the shape of the fitted PDF is of interest and the data is binned. """ + __slots__ = () + + @property + def cdf(self): + """Cumulative density function.""" + return self._model + def __init__(self, n, xe, cdf, verbose=0): """ **Parameters** @@ -180,7 +343,8 @@ def __init__(self, n, xe, cdf, verbose=0): cdf: callable Cumulative density function of the form f(xe, par0, par1, ..., parN), - where `xe` is a bin edge and par0, ... parN are model parameters. + where `xe` is a bin edge and par0, ... parN are model parameters. Must be + normalized to unity over the range (xe[0], xe[-1]). verbose: int, optional Verbosity level @@ -188,40 +352,35 @@ def __init__(self, n, xe, cdf, verbose=0): - 0: is no output (default) - 1: print current args and negative log-likelihood value """ - n = np.atleast_1d(n) - xe = np.atleast_1d(xe) - - if np.any((np.array(n.shape) + 1) != xe.shape): - raise ValueError("n and xe have incompatible shapes") + super().__init__(n, xe, cdf, verbose) - self.n = n - self.xe = xe - self.cdf = cdf - Cost.__init__(self, describe(self.cdf)[1:], verbose, 0.5) - - def __call__(self, *args): - prob = np.diff(self.cdf(self.xe, *args)) - ma = self.mask - if ma is None: - n = self.n - else: - n = self.n[ma] + def _call(self, args): + prob = np.diff(self._model(self._xe, *args)) + n = self._masked + ma = self._mask + if ma is not None: prob = prob[ma] mu = np.sum(n) * prob # + np.sum(mu) can be skipped, it is effectively constant - r = _neg_sum_n_log_mu(n, mu) - if self.verbose >= 1: - print(args, "->", r) - return r + return 2.0 * _neg_sum_n_log_mu(n, mu) + + def _make_masked(self): + return self._n if self._mask is None else self._n[self._mask] -class ExtendedBinnedNLL(Cost): +class ExtendedBinnedNLL(BinnedCost): """Binned extended negative log-likelihood. Use this if shape and normalization of the fitted PDF are of interest and the data is binned. """ + __slots__ = () + + @property + def scaled_cdf(self): + return self._model + def __init__(self, n, xe, scaled_cdf, verbose=0): """ **Parameters** @@ -242,39 +401,76 @@ def __init__(self, n, xe, scaled_cdf, verbose=0): - 0: is no output (default) - 1: print current args and negative log-likelihood value """ - n = np.atleast_1d(n) - xe = np.atleast_1d(xe) - - if np.any((np.array(n.shape) + 1) != xe.shape): - raise ValueError("n and xe have incompatible shapes") + super().__init__(n, xe, scaled_cdf, verbose) - self.n = n - self.xe = xe - self.scaled_cdf = scaled_cdf - Cost.__init__(self, describe(self.scaled_cdf)[1:], verbose, 0.5) - - def __call__(self, *args): - mu = np.diff(self.scaled_cdf(self.xe, *args)) - ma = self.mask - if ma is None: - n = self.n - else: - n = self.n[ma] + def _call(self, args): + mu = np.diff(self._model(self._xe, *args)) + ma = self._mask + n = self._masked + if ma is not None: mu = mu[ma] - r = _sum_log_poisson(n, mu) - if self.verbose >= 1: - print(args, "->", r) - return r + return 2.0 * _sum_log_poisson(n, mu) + def _make_masked(self): + return self._n if self._mask is None else self._n[self._mask] -class LeastSquares(Cost): + +class LeastSquares(MaskedCost): """Least-squares cost function (aka chisquare function). Use this if you have data of the form (x, y +/- yerror). """ - _loss = None - _cost = None + __slots__ = "_loss", "_cost", "_x", "_y", "_yerror", "_model" + + @property + def x(self): + """Explanatory variable.""" + return self._x + + @x.setter + def x(self, value): + self._x[:] = value + + @property + def y(self): + """Sample.""" + return self._y + + @y.setter + def y(self, value): + self._y[:] = value + + @property + def yerror(self): + """Expected uncertainty of sample.""" + return self._yerror + + @yerror.setter + def yerror(self, value): + self._yerror[:] = value + + @property + def model(self): + """Model of the form y = f(x, par0, [par1, ...]).""" + return self._model + + @property + def loss(self): + """Loss function. See LeastSquares.__init__ for details.""" + return self._loss + + @loss.setter + def loss(self, loss): + self._loss = loss + if hasattr(loss, "__call__"): + self._cost = lambda y, ye, ym: np.sum(loss(_z_squared(y, ye, ym))) + elif loss == "linear": + self._cost = _sum_z_squared + elif loss == "soft_l1": + self._cost = _sum_z_squared_soft_l1 + else: + raise ValueError("unknown loss type: " + loss) def __init__(self, x, y, yerror, model, loss="linear", verbose=0): """ @@ -310,54 +506,79 @@ def __init__(self, x, y, yerror, model, loss="linear", verbose=0): - 0: is no output (default) - 1: print current args and negative log-likelihood value """ - x = np.atleast_1d(x) - y = np.atleast_1d(y) + x = _norm(x) + y = _norm(y) + yerror = np.asarray(yerror, dtype=float) if len(x) != len(y): raise ValueError("x and y must have same length") - if np.ndim(yerror) == 0: + if yerror.ndim == 0: yerror = yerror * np.ones_like(y) - else: - yerror = np.asarray(yerror) - if yerror.shape != y.shape: - raise ValueError("y and yerror must have same shape") - - self.x = x - self.y = y - self.yerror = yerror - self.model = model - self.loss = loss - Cost.__init__(self, describe(self.model)[1:], verbose, 1.0) + elif yerror.shape != y.shape: + raise ValueError("y and yerror must have same shape") - @property - def loss(self): - return self._loss + self._x = x + self._y = y + self._yerror = yerror + self._model = model + self.loss = loss + super().__init__(describe(self._model)[1:], verbose) - @loss.setter - def loss(self, loss): - self._loss = loss - if hasattr(loss, "__call__"): - self._cost = lambda y, ye, ym: np.sum(loss(_z_squared(y, ye, ym))) - elif loss == "linear": - self._cost = _sum_z_squared - elif loss == "soft_l1": - self._cost = _sum_z_squared_soft_l1 - else: - raise ValueError("unknown loss type: " + loss) + def _call(self, args): + x, y, yerror = self._masked + ym = self._model(x, *args) + return self._cost(y, yerror, ym) - def __call__(self, *args): - ma = self.mask + def _make_masked(self): + ma = self._mask if ma is None: - x = self.x - y = self.y - yerror = self.yerror + return self._x, self._y, self._yerror else: - x = self.x[ma] - y = self.y[ma] - yerror = self.yerror[ma] - ym = self.model(x, *args) - r = self._cost(y, yerror, ym) - if self.verbose >= 1: - print(args, "->", r) - return r + return self._x[ma], self._y[ma], self._yerror[ma] + + +class NormalConstraint(Cost): + + __slots__ = "_value", "_cov", "_covinv" + + def __init__(self, args, value, error): + if isinstance(args, str): + args = [args] + self._value = _norm(value) + self._cov = _norm(error) + if self._cov.ndim < 2: + self._cov **= 2 + self._covinv = _covinv(self._cov) + super().__init__(args, False) + + @property + def covariance(self): + return self._cov + + @covariance.setter + def covariance(self, value): + self._cov[:] = value + self._covinv = _covinv(self._cov) + + @property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value[:] = value + + def _call(self, args): + delta = self._value - args + if self._covinv.ndim < 2: + return np.sum(delta ** 2 * self._covinv) + return np.einsum("i,ij,j", delta, self._covinv, delta) + + +def _norm(value): + return np.atleast_1d(np.asarray(value, dtype=float)) + + +def _covinv(array): + return np.linalg.inv(array) if array.ndim == 2 else 1.0 / array diff --git a/src/iminuit/util.py b/src/iminuit/util.py index d557b0664..5fa500366 100644 --- a/src/iminuit/util.py +++ b/src/iminuit/util.py @@ -699,10 +699,11 @@ def describe(callable): 1. Using ``obj.func_code``. If an objects has a ``func_code`` attribute, it is used to detect the parameters. Examples:: - def f(x, y): + def f(*args): # no signature + x, y = args return (x - 2) ** 2 + (y - 3) ** 2 - f = lambda x, y: (x - 2) ** 2 + (y - 3) ** 2 + f.func_code = make_func_code(("x", "y")) Users are encouraged to use this mechanism to provide signatures for objects that otherwise would not have a detectable signature. The function diff --git a/src/iminuit/version.py b/src/iminuit/version.py index 9aa3f9036..08f3e2ddf 100644 --- a/src/iminuit/version.py +++ b/src/iminuit/version.py @@ -1 +1 @@ -__version__ = "2.1.0" +__version__ = "2.2.0.rc1" diff --git a/tests/test_cost.py b/tests/test_cost.py index 093c9bc22..f0d5d771a 100644 --- a/tests/test_cost.py +++ b/tests/test_cost.py @@ -1,14 +1,17 @@ import pytest import numpy as np -from numpy.testing import assert_allclose +from numpy.testing import assert_allclose, assert_equal from iminuit import Minuit from iminuit.cost import ( + CostSum, UnbinnedNLL, BinnedNLL, ExtendedUnbinnedNLL, ExtendedBinnedNLL, LeastSquares, + NormalConstraint, ) +from collections.abc import Sequence stats = pytest.importorskip("scipy.stats") norm = stats.norm @@ -20,8 +23,8 @@ def expon_cdf(x, a): @pytest.fixture def unbinned(): - np.random.seed(1) - x = np.random.randn(1000) + rng = np.random.default_rng(1) + x = rng.normal(size=1000) mle = (len(x), np.mean(x), np.std(x, ddof=1)) return mle, x @@ -144,12 +147,32 @@ def test_LeastSquares_bad_input(): def test_UnbinnedNLL_mask(): c = UnbinnedNLL([1, np.nan, 2], lambda x, a: x + a) + assert c.mask is None assert np.isnan(c(0)) == True c.mask = np.arange(3) != 1 + assert_equal(c.mask, (True, False, True)) assert np.isnan(c(0)) == False +def test_UnbinnedNLL_properties(): + def pdf(x, a, b): + return 0 + + c = UnbinnedNLL([1, 2], pdf) + assert c.pdf is pdf + with pytest.raises(AttributeError): + c.pdf = None + assert_equal(c.data, [1, 2]) + c.data = [2, 3] + assert_equal(c.data, [2, 3]) + with pytest.raises(ValueError): + c.data = [1, 2, 3] + assert c.verbose == 0 + c.verbose = 1 + assert c.verbose == 1 + + def test_ExtendedUnbinnedNLL_mask(): c = ExtendedUnbinnedNLL([1, np.nan, 2], lambda x, a: (1, x + a)) @@ -158,6 +181,16 @@ def test_ExtendedUnbinnedNLL_mask(): assert np.isnan(c(0)) == False +def test_ExtendedUnbinnedNLL_properties(): + def pdf(x, a, b): + return 0 + + c = ExtendedUnbinnedNLL([1, 2], pdf) + assert c.scaled_pdf is pdf + with pytest.raises(AttributeError): + c.scaled_pdf = None + + def test_BinnedNLL_mask(): c = BinnedNLL([5, 1000, 1], [0, 1, 2, 3], expon_cdf) @@ -167,6 +200,26 @@ def test_BinnedNLL_mask(): assert c(1) < c_unmasked +def test_BinnedNLL_properties(): + def cdf(x, a, b): + return 0 + + c = BinnedNLL([1], [1, 2], cdf) + assert c.cdf is cdf + with pytest.raises(AttributeError): + c.cdf = None + assert_equal(c.n, [1]) + assert_equal(c.xe, [1, 2]) + c.n = [2] + c.xe = [2, 3] + assert_equal(c.n, [2]) + assert_equal(c.xe, [2, 3]) + with pytest.raises(ValueError): + c.n = [1, 2] + with pytest.raises(ValueError): + c.xe = [1, 2, 3] + + def test_ExtendedBinnedNLL_mask(): c = ExtendedBinnedNLL([1, 1000, 2], [0, 1, 2, 3], expon_cdf) @@ -175,8 +228,160 @@ def test_ExtendedBinnedNLL_mask(): assert c(2) < c_unmasked +def test_ExtendedBinnedNLL_properties(): + def cdf(x, a): + return 0 + + c = ExtendedBinnedNLL([1], [1, 2], cdf) + assert c.scaled_cdf is cdf + + def test_LeastSquares_mask(): c = LeastSquares([1, 2, 3], [3, np.nan, 4], [1, 1, 1], lambda x, a: x + a) assert np.isnan(c(0)) == True c.mask = np.arange(3) != 1 assert np.isnan(c(0)) == False + + +def test_LeastSquares_properties(): + def model(x, a): + return a + + c = LeastSquares(1, 2, 3, model) + assert_equal(c.x, [1]) + assert_equal(c.y, [2]) + assert_equal(c.yerror, [3]) + assert c.model is model + with pytest.raises(AttributeError): + c.model = model + with pytest.raises(ValueError): + c.x = [1, 2] + with pytest.raises(ValueError): + c.y = [1, 2] + with pytest.raises(ValueError): + c.yerror = [1, 2] + + +def test_addable_cost_1(): + def model1(x, a): + return a + x + + def model2(x, b, a): + return a + b * x + + def model3(x, c): + return c + + lsq1 = LeastSquares(1, 2, 3, model1) + assert lsq1.func_code.co_varnames == ("a",) + + lsq2 = LeastSquares(1, 3, 4, model2) + assert lsq2.func_code.co_varnames == ("b", "a") + + lsq3 = LeastSquares(1, 1, 1, model3) + assert lsq3.func_code.co_varnames == ("c",) + + lsq12 = lsq1 + lsq2 + assert lsq12._items == [lsq1, lsq2] + assert isinstance(lsq12, CostSum) + assert isinstance(lsq1, LeastSquares) + assert isinstance(lsq2, LeastSquares) + assert lsq12.func_code.co_varnames == ("a", "b") + + assert lsq12(1, 2) == lsq1(1) + lsq2(2, 1) + + m = Minuit(lsq12, a=0, b=0) + m.migrad() + assert m.parameters == ("a", "b") + assert_allclose(m.values, (1, 2)) + assert_allclose(m.errors, (3, 5)) + assert_allclose(m.covariance, ((9, -9), (-9, 25)), atol=1e-10) + + lsq121 = lsq12 + lsq1 + assert lsq121._items == [lsq1, lsq2, lsq1] + assert lsq121.func_code.co_varnames == ("a", "b") + + lsq312 = lsq3 + lsq12 + assert lsq312._items == [lsq3, lsq1, lsq2] + assert lsq312.func_code.co_varnames == ("c", "a", "b") + + lsq31212 = lsq312 + lsq12 + assert lsq31212._items == [lsq3, lsq1, lsq2, lsq1, lsq2] + assert lsq31212.func_code.co_varnames == ("c", "a", "b") + + lsq31212 += lsq1 + assert lsq31212._items == [lsq3, lsq1, lsq2, lsq1, lsq2, lsq1] + assert lsq31212.func_code.co_varnames == ("c", "a", "b") + + +def test_addable_cost_2(): + ref = NormalConstraint("a", 1, 2), NormalConstraint(("b", "a"), (1, 1), (2, 2)) + cs = ref[0] + ref[1] + assert isinstance(cs, Sequence) + assert len(cs) == 2 + assert cs[0] is ref[0] + assert cs[1] is ref[1] + for c, r in zip(cs, ref): + assert c is r + assert cs.index(ref[0]) == 0 + assert cs.index(ref[1]) == 1 + assert cs.count(ref[0]) == 1 + + +def test_NormalConstraint_1(): + def model(x, a): + return a + + lsq1 = LeastSquares(0, 1, 1, model) + lsq2 = lsq1 + NormalConstraint("a", 1, 0.1) + assert lsq1.func_code.co_varnames == ("a",) + assert lsq2.func_code.co_varnames == ("a",) + + m = Minuit(lsq1, 0) + m.migrad() + assert_allclose(m.values, (1,), atol=1e-2) + assert_allclose(m.errors, (1,), rtol=1e-2) + + m = Minuit(lsq2, 0) + m.migrad() + assert_allclose(m.values, (1,), atol=1e-2) + assert_allclose(m.errors, (0.1,), rtol=1e-2) + + +def test_NormalConstraint_2(): + lsq1 = NormalConstraint(("a", "b"), (1, 2), (2, 2)) + lsq2 = lsq1 + NormalConstraint("b", 2, 0.1) + NormalConstraint("a", 1, 0.01) + sa = 0.1 + sb = 0.02 + rho = 0.5 + cov = ((sa ** 2, rho * sa * sb), (rho * sa * sb, sb ** 2)) + lsq3 = lsq1 + NormalConstraint(("a", "b"), (1, 2), cov) + assert lsq1.func_code.co_varnames == ("a", "b") + assert lsq2.func_code.co_varnames == ("a", "b") + assert lsq3.func_code.co_varnames == ("a", "b") + + m = Minuit(lsq1, 0, 0) + m.migrad() + assert_allclose(m.values, (1, 2), atol=1e-3) + assert_allclose(m.errors, (2, 2), rtol=1e-3) + + m = Minuit(lsq2, 0, 0) + m.migrad() + assert_allclose(m.values, (1, 2), atol=1e-3) + assert_allclose(m.errors, (0.01, 0.1), rtol=1e-2) + + m = Minuit(lsq3, 0, 0) + m.migrad() + assert_allclose(m.values, (1, 2), atol=1e-3) + assert_allclose(m.errors, (sa, sb), rtol=1e-2) + assert_allclose(m.covariance, cov, rtol=1e-2) + + +def test_NormalConstraint_properties(): + nc = NormalConstraint(("a", "b"), (1, 2), (3, 4)) + assert_equal(nc.value, (1, 2)) + assert_equal(nc.covariance, (9, 16)) + nc.value = (2, 3) + nc.covariance = (1, 2) + assert_equal(nc.value, (2, 3)) + assert_equal(nc.covariance, (1, 2))