diff --git a/modepy/matrices.py b/modepy/matrices.py index 307697c..05ea349 100644 --- a/modepy/matrices.py +++ b/modepy/matrices.py @@ -131,6 +131,11 @@ def multi_vandermonde( A sequence of matrices is returned--i.e. this function works directly on :func:`modepy.Basis.gradients` and returns a tuple of matrices. + + .. note:: + + If only one of the matrices is needed, it may be convenient to instead call + :func:`vandermonde` with the result of :meth:`Basis.derivatives`. """ nnodes = nodes.shape[-1] diff --git a/modepy/modes.py b/modepy/modes.py index 0ef0ec3..05a3134 100644 --- a/modepy/modes.py +++ b/modepy/modes.py @@ -27,10 +27,12 @@ import math from abc import ABC, abstractmethod from collections.abc import Callable, Hashable, Iterable, Sequence +from dataclasses import dataclass, field from functools import partial, singledispatch from typing import ( TYPE_CHECKING, TypeVar, + cast, ) import numpy as np @@ -114,6 +116,7 @@ .. autofunction:: monomial .. autofunction:: grad_monomial +.. autofunction:: diff_monomial """ @@ -486,7 +489,37 @@ def monomial(order: tuple[int, ...], rst: np.ndarray) -> np.ndarray: return product(rst[i] ** order[i] for i in range(dim)) -def grad_monomial(order: tuple[int, ...], rst: np.ndarray) -> tuple[RealValueT, ...]: +def _diff_monomial_1d(r: np.ndarray, o: int) -> np.ndarray: + if o == 0: + return 0*r + elif o == 1: + return 1+0*r + else: + return o * r**(o-1) + + +def diff_monomial( + order: tuple[int, ...], + diff_axis: int, + rst: np.ndarray + ) -> np.ndarray: + """Evaluate the derivative of the monomial of order *order* + with respect to the axis *diff_axis* at the points *rst*. + + :arg order: A tuple *(i, j,...)* representing the order of the polynomial. + :arg rst: ``rst[0], rst[1]`` are arrays of :math:`(r, s, ...)` coordinates. + (See :ref:`tri-coords`) + """ + dim = len(order) + assert dim == rst.shape[0] + + from pytools import product + return product( + _diff_monomial_1d(rst[i], order[i]) if diff_axis == i else rst[i] ** order[i] + for i in range(dim)) + + +def grad_monomial(order: tuple[int, ...], rst: np.ndarray) -> tuple[np.ndarray, ...]: """Evaluate the derivative of the monomial of order *order* at the points *rst*. :arg order: A tuple *(i, j,...)* representing the order of the polynomial. @@ -500,19 +533,11 @@ def grad_monomial(order: tuple[int, ...], rst: np.ndarray) -> tuple[RealValueT, dim = len(order) assert dim == rst.shape[0] - def diff_monomial(r, o): - if o == 0: - return 0*r - elif o == 1: - return 1+0*r - else: - return o * r**(o-1) - from pytools import product return tuple( product( ( - diff_monomial(rst[i], order[i]) + _diff_monomial_1d(rst[i], order[i]) if j == i else rst[i] ** order[i]) for i in range(dim) @@ -524,43 +549,45 @@ def diff_monomial(r, o): # {{{ tensor product basis helpers +@dataclass(frozen=True) class _TensorProductBasisFunction: r""" - .. attribute:: multi_index - - A :class:`tuple` used to identify each function in :attr:`functions` - that is mainly meant for debugging and not used internally. - - .. attribute:: functions - - A :class:`tuple` of callables that can be evaluated on the tensor - product space :math:`\mathbb{R}^{d_1} \times \cdots \times \mathbb{R}^{d_n}`, - i.e. one function :math:`f_i` for each :math:`\mathbb{R}^{d_i}` component + .. autoattribute:: multi_index + .. autoattribute:: functions + .. autoattribute:: dims_per_function + """ - .. math:: + multi_index: tuple[Hashable, ...] + """ + A :class:`tuple` used to identify each function in :attr:`functions` + that is mainly meant for debugging and not used internally. + """ - f(x_1, \dots, x_d) = - f_1(x_1, \dots, x_{d_1}) \times \cdots \times - f_n(x_{d_{n - 1}}, \dots, x_{d_n}) + functions: tuple[Callable[[np.ndarray], np.ndarray], ...] + r"""A :class:`tuple` of callables that can be evaluated on the tensor + product space :math:`\mathbb{R}^{d_1} \times \cdots \times \mathbb{R}^{d_n}`, + i.e. one function :math:`f_i` for each :math:`\mathbb{R}^{d_i}` component - .. attribute:: dims_per_function + .. math:: - A :class:`tuple` containing the dimensions :math:`(d_1, \dots, d_n)`. + f(x_1, \dots, x_d) = + f_1(x_1, \dots, x_{d_1}) \times \cdots \times + f_n(x_{d_{n - 1}}, \dots, x_{d_n}) """ - def __init__(self, - multi_index: tuple[Hashable, ...], - functions: tuple[Callable[[np.ndarray], np.ndarray], ...], *, - dims_per_function: tuple[int, ...]) -> None: - assert len(dims_per_function) == len(functions) + dims_per_function: tuple[int, ...] = field(kw_only=True) + r""" + A :class:`tuple` containing the dimensions :math:`(d_1, \dots, d_n)`. + """ - self.multi_index = multi_index - self.functions = functions + def __post_init__(self) -> None: + assert len(self.dims_per_function) == len(self.functions) - self.dims_per_function = dims_per_function - self.ndim = sum(self.dims_per_function) + @property + def ndim(self) -> int: + return sum(self.dims_per_function) - def __call__(self, x): + def __call__(self, x: np.ndarray) -> np.ndarray: assert x.shape[0] == self.ndim n = 0 @@ -573,7 +600,7 @@ def __call__(self, x): result = np.ones(x.shape[1:], dtype=(x+np.float32(1)).dtype) else: # Likely we're evaluating symbolically. - result = 1 + result = 1 # type: ignore[assignment] for d, function in zip(self.dims_per_function, self.functions, strict=True): result *= function(x[n:n + d]) @@ -586,69 +613,67 @@ def __repr__(self): f"dims={self.dims_per_function}, functions={self.functions})") +@dataclass(frozen=True) class _TensorProductGradientBasisFunction: r""" - .. attribute:: multi_index - - A :class:`tuple` used to identify each function in :attr:`functions` - that is mainly meant for debugging and not used internally. - - .. attribute:: derivatives - - A :class:`tuple` of :class:`tuple`\ s of callables ``df[i][j]`` that - evaluate the derivatives of the tensor product. Each ``df[i]`` tuple - is equivalent to a :class:`_TensorProductBasisFunction` and is used - to evaluate the derivatives of a single basis function of the tensor - product. To be specific, a basis function in the tensor product is - given by + .. autoattribute:: multi_index + .. autoattribute:: derivatives + .. autoattribute:: dims_per_function + """ - .. math:: + multi_index: tuple[int, ...] + """A :class:`tuple` used to identify each function in :attr:`functions` + that is mainly meant for debugging and not used internally. + """ - f(x_1, \dots, x_d) = - f_1(x_1, \dots, x_{d_1}) \times \cdots \times - f_n(x_{d_{n - 1}}, \dots, x_{d_n}) + derivatives: tuple[tuple[ + Callable[[np.ndarray], np.ndarray | tuple[np.ndarray, ...]], + ...], ...] + r"""A :class:`tuple` of :class:`tuple`\ s of callables ``df[i][j]`` that + evaluate the derivatives of the tensor product. Each ``df[i]`` tuple + is equivalent to a :class:`_TensorProductBasisFunction` and is used + to evaluate the derivatives of a single basis function of the tensor + product. To be specific, a basis function in the tensor product is + given by - and its derivative with respect to :math:`x_k`, for :math:`k \in - [d_i, d_{i + 1})` is given by + .. math:: - .. math:: + f(x_1, \dots, x_d) = + f_1(x_1, \dots, x_{d_1}) \times \cdots \times + f_n(x_{d_{n - 1}}, \dots, x_{d_n}) - \frac{\partial f}{\partial x_k} = - f_1 \times \cdots \times - \frac{\partial f_i}{x_k} - \times \cdots \times - f_n. + and its derivative with respect to :math:`x_k`, for :math:`k \in + [d_i, d_{i + 1})` is given by - In our notation, ``df[i]`` gives all the derivatives of :math:`f` - with respect to :math:`k \in [d_i, d_{i + 1})`. When evaluating - ``df[i][j]`` can be a function :math:`f_i`, for which the callable - just returns the function values, or :math:`\partial_k f_i`, for - which it returns all the derivatives with respect to - :math:`k \in [d_i, d_{i + 1}]`. + .. math:: - .. attribute:: dims_per_function + \frac{\partial f}{\partial x_k} = + f_1 \times \cdots \times + \frac{\partial f_i}{x_k} + \times \cdots \times + f_n. - A :class:`tuple` containing the dimensions :math:`(d_1, \dots, d_n)`. - """ + In our notation, ``df[i]`` gives all the derivatives of :math:`f` + with respect to :math:`k \in [d_i, d_{i + 1})`. When evaluating + ``df[i][j]`` can be a function :math:`f_i`, for which the callable + just returns the function values, or :math:`\partial_k f_i`, for + which it returns all the derivatives with respect to + :math:`k \in [d_i, d_{i + 1}]`.""" - def __init__(self, - multi_index: tuple[int, ...], - derivatives: tuple[tuple[ - Callable[[np.ndarray], np.ndarray | tuple[np.ndarray, ...]], - ...], ...], *, - dims_per_function: tuple[int, ...]) -> None: - assert all(len(dims_per_function) == len(df) for df in derivatives) + dims_per_function: tuple[int, ...] = field(kw_only=True) + r"""A :class:`tuple` containing the dimensions :math:`(d_1, \dots, d_n)`.""" - self.multi_index = multi_index - self.derivatives = tuple(derivatives) + def __post_init__(self) -> None: + assert all(len(self.dims_per_function) == len(df) for df in self.derivatives) - self.dims_per_function = dims_per_function - self.ndim = sum(self.dims_per_function) + @property + def ndim(self) -> int: + return sum(self.dims_per_function) - def __call__(self, x): + def __call__(self, x: np.ndarray) -> tuple[np.ndarray, ...]: assert x.shape[0] == self.ndim - result = [1] * self.ndim + result: list[int | np.ndarray] = [1] * self.ndim n = 0 for ider, derivative in enumerate(self.derivatives): f = 0 @@ -670,7 +695,7 @@ def __call__(self, x): f += iaxis n += self.dims_per_function[ider] - return tuple(result) + return cast(tuple[np.ndarray, ...], tuple(result)) def __repr__(self): return (f"{type(self).__name__}(mi={self.multi_index}, " @@ -766,6 +791,28 @@ def gradients(self) -> tuple[BasisGradientType, ...]: Each function returns a tuple of derivatives, one per reference axis. """ + def derivatives(self, axis: int) -> tuple[BasisFunctionType, ...]: + """ + Returns a tuple of callable functions in the same order as + :meth:`functions` representing the same basis functions with a + derivative with respect to *axis* applied. + + .. note:: + + :meth:`gradients` and :meth:`derivatives` provide equivalent + functionality. Usage ergonomics are often 'nicer' for + :meth:`derivatives`. If *all* gradient data is desired + for simplices, calling :meth:`gradients` can be somewhat more + efficient because subexpressions can be reused across + components of the gradient. + + .. versionadded:: 2024.1.1 + """ + return tuple( + partial(lambda fi_inner, nodes: fi_inner(nodes)[axis], fi) + for fi in self.gradients + ) + # }}} @@ -860,13 +907,18 @@ def orthonormality_weight(self) -> float: raise BasisNotOrthonormal @property - def functions(self): + def functions(self) -> tuple[BasisFunctionType, ...]: return tuple(partial(monomial, mid) for mid in self.mode_ids) @property - def gradients(self): + def gradients(self) -> tuple[BasisGradientType, ...]: return tuple(partial(grad_monomial, mid) for mid in self.mode_ids) + def derivatives(self, axis: int) -> tuple[BasisFunctionType, ...]: + return tuple( + partial(diff_monomial, mid, axis) + for mid in self.mode_ids) + @basis_for_space.register(PN) def _basis_for_pn(space: PN, shape: Simplex): @@ -946,6 +998,15 @@ def _mode_index_tuples(self) -> tuple[tuple[int, ...], ...]: for mid in gnitb([len(b.functions) for b in self._bases[::-1]])) + @property + def _dim_ranges(self) -> tuple[tuple[int, int], ...]: + idim = 0 + result = [] + for dims in self._dims_per_basis: + result.append((idim, idim + dims)) + idim += dims + return tuple(result) + @property def mode_ids(self) -> tuple[Hashable, ...]: underlying_mode_id_lists = [basis.mode_ids for basis in self._bases] @@ -977,10 +1038,11 @@ def part_flat_tuple(iterable: Iterable[tuple[bool, Hashable]] @property def functions(self) -> tuple[BasisFunctionType, ...]: + bases = [b.functions for b in self._bases] return tuple( _TensorProductBasisFunction( mid, - tuple(self.bases[ibasis].functions[mid_i] + tuple(bases[ibasis][mid_i] for ibasis, mid_i in enumerate(mid)), dims_per_function=self._dims_per_basis) for mid in self._mode_index_tuples) @@ -1002,6 +1064,21 @@ def gradients(self) -> tuple[BasisGradientType, ...]: dims_per_function=self._dims_per_basis) for mid in self._mode_index_tuples) + def derivatives(self, axis: int) -> tuple[BasisFunctionType, ...]: + bases = [b.functions for b in self._bases] + return tuple( + _TensorProductBasisFunction( + mid, + tuple( + ( + self._bases[ibasis].derivatives(axis-start_axis)[mid_i] + if start_axis <= axis < end_axis + else bases[ibasis][mid_i]) + for ibasis, (mid_i, (start_axis, end_axis)) + in enumerate(zip(mid, self._dim_ranges, strict=True))), + dims_per_function=self._dims_per_basis) + for mid in self._mode_index_tuples) + @orthonormal_basis_for_space.register(TensorProductSpace) def _orthonormal_basis_for_tp( diff --git a/modepy/test/test_modes.py b/modepy/test/test_modes.py index 04f8d8f..fb92365 100644 --- a/modepy/test/test_modes.py +++ b/modepy/test/test_modes.py @@ -178,7 +178,8 @@ def test_basis_grad(dim, shape_cls, order, basis_getter): from pytools import wandering_element from pytools.convergence import EOCRecorder - for bf, gradbf in zip(basis.functions, basis.gradients, strict=True): + for ifunc, (bf, gradbf) in enumerate( + zip(basis.functions, basis.gradients, strict=True)): eoc_rec = EOCRecorder() for h in [1e-2, 1e-3]: r = mp.random_nodes_for_shape(shape, nnodes=1000, rng=rng) @@ -198,6 +199,15 @@ def test_basis_grad(dim, shape_cls, order, basis_getter): logger.info("error: %.5e", err) eoc_rec.add_data_point(h, err) + for idiff_axis in range(shape.dim): + dr_bf = basis.derivatives(idiff_axis)[ifunc](r) + ref = gradbf_v[idiff_axis] + err = la.norm(dr_bf - ref, 2) + if la.norm(ref, 2) > 1e-13: + err = err/la.norm(ref, 2) + + assert err < 1e-13 + tol = 1e-8 if eoc_rec.max_error() >= tol: logger.info("\n%s", str(eoc_rec))