diff --git a/.buildinfo b/.buildinfo index 31e8e274..ac20601e 100644 --- a/.buildinfo +++ b/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: d4a3d79d798cc056ca1d07299713b106 +config: 164316e6c6e9cd74c0f230f8bf4852d6 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/_LICENSE.html b/_LICENSE.html index b40d0649..2e9dcfa7 100644 --- a/_LICENSE.html +++ b/_LICENSE.html @@ -1,97 +1,472 @@ + - - - - - License — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + License — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + +
+ + +
+ + + +
- + + + + + + + \ No newline at end of file diff --git a/_cite.html b/_cite.html index 9c21978a..28fee6bb 100644 --- a/_cite.html +++ b/_cite.html @@ -1,143 +1,565 @@ + - - - - - Cite PINA — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Cite PINA — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + -
-
+ + + +
-
- +
+ + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_images/API_color1.png b/_images/API_color1.png deleted file mode 100644 index 9e61695c..00000000 Binary files a/_images/API_color1.png and /dev/null differ diff --git a/_images/tutorial_13_011.png b/_images/tutorial_13_011.png new file mode 100644 index 00000000..3a838eea Binary files /dev/null and b/_images/tutorial_13_011.png differ diff --git a/_images/tutorial_13_31.png b/_images/tutorial_13_31.png new file mode 100644 index 00000000..b0e5d83f Binary files /dev/null and b/_images/tutorial_13_31.png differ diff --git a/_images/tutorial_15_01.png b/_images/tutorial_15_01.png new file mode 100644 index 00000000..eca9363d Binary files /dev/null and b/_images/tutorial_15_01.png differ diff --git a/_images/tutorial_19_0.png b/_images/tutorial_19_0.png deleted file mode 100644 index 81485c71..00000000 Binary files a/_images/tutorial_19_0.png and /dev/null differ diff --git a/_images/tutorial_27_0.png b/_images/tutorial_27_0.png new file mode 100644 index 00000000..1e1c3be3 Binary files /dev/null and b/_images/tutorial_27_0.png differ diff --git a/_images/tutorial_32_01.png b/_images/tutorial_32_01.png new file mode 100644 index 00000000..843d8376 Binary files /dev/null and b/_images/tutorial_32_01.png differ diff --git a/_images/tutorial_36_01.png b/_images/tutorial_36_01.png new file mode 100644 index 00000000..fc10554a Binary files /dev/null and b/_images/tutorial_36_01.png differ diff --git a/_images/tutorial_5_01.png b/_images/tutorial_5_01.png new file mode 100644 index 00000000..cc31b44c Binary files /dev/null and b/_images/tutorial_5_01.png differ diff --git a/_images/tutorial_5_02.png b/_images/tutorial_5_02.png new file mode 100644 index 00000000..deda1958 Binary files /dev/null and b/_images/tutorial_5_02.png differ diff --git a/_images/tutorial_5_1.png b/_images/tutorial_5_1.png deleted file mode 100644 index 00ee2a00..00000000 Binary files a/_images/tutorial_5_1.png and /dev/null differ diff --git a/_modules/index.html b/_modules/index.html index caf2faf3..ddf29f91 100644 --- a/_modules/index.html +++ b/_modules/index.html @@ -1,92 +1,456 @@ + - - - - - Overview: module code — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + Overview: module code — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/adaptive_functions/adaptive_func.html b/_modules/pina/adaptive_functions/adaptive_func.html index 5d09fc3d..35b03a4f 100644 --- a/_modules/pina/adaptive_functions/adaptive_func.html +++ b/_modules/pina/adaptive_functions/adaptive_func.html @@ -1,93 +1,459 @@ + - - - - - pina.adaptive_functions.adaptive_func — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.adaptive_functions.adaptive_func — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.adaptive_functions.adaptive_func

     """ Module for adaptive functions. """
     
    @@ -96,8 +462,10 @@ 

    Source code for pina.adaptive_functions.adaptive_func

    from .adaptive_func_interface import AdaptiveActivationFunctionInterface -
    [docs]class AdaptiveReLU(AdaptiveActivationFunctionInterface): - r""" +
    +[docs] +class AdaptiveReLU(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :class:`~torch.nn.ReLU` activation function. Given the function :math:`\text{ReLU}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -137,8 +505,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.nn.ReLU()
    -
    [docs]class AdaptiveSigmoid(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveSigmoid(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :class:`~torch.nn.Sigmoid` activation function. Given the function :math:`\text{Sigmoid}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -178,8 +549,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.nn.Sigmoid()
    -
    [docs]class AdaptiveTanh(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveTanh(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :class:`~torch.nn.Tanh` activation function. Given the function :math:`\text{Tanh}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -219,8 +593,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.nn.Tanh()
    -
    [docs]class AdaptiveSiLU(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveSiLU(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :class:`~torch.nn.SiLU` activation function. Given the function :math:`\text{SiLU}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -261,8 +638,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.nn.SiLU()
    -
    [docs]class AdaptiveMish(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveMish(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :class:`~torch.nn.Mish` activation function. Given the function :math:`\text{Mish}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -302,8 +682,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.nn.Mish()
    -
    [docs]class AdaptiveELU(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveELU(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :class:`~torch.nn.ELU` activation function. Given the function :math:`\text{ELU}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -346,8 +729,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.nn.ELU()
    -
    [docs]class AdaptiveCELU(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveCELU(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :class:`~torch.nn.CELU` activation function. Given the function :math:`\text{CELU}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -387,8 +773,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.nn.CELU()
    -
    [docs]class AdaptiveGELU(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveGELU(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :class:`~torch.nn.GELU` activation function. Given the function :math:`\text{GELU}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -429,8 +818,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.nn.GELU()
    -
    [docs]class AdaptiveSoftmin(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveSoftmin(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :class:`~torch.nn.Softmin` activation function. Given the function :math:`\text{Softmin}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -470,8 +862,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.nn.Softmin()
    -
    [docs]class AdaptiveSoftmax(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveSoftmax(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :class:`~torch.nn.Softmax` activation function. Given the function :math:`\text{Softmax}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -511,8 +906,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.nn.Softmax()
    -
    [docs]class AdaptiveSIREN(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveSIREN(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :obj:`~torch.sin` function. Given the function :math:`\text{sin}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -548,8 +946,11 @@

    Source code for pina.adaptive_functions.adaptive_func

    self._func = torch.sin
    -
    [docs]class AdaptiveExp(AdaptiveActivationFunctionInterface): - r""" + +
    +[docs] +class AdaptiveExp(AdaptiveActivationFunctionInterface): + r""" Adaptive trainable :obj:`~torch.exp` function. Given the function :math:`\text{exp}:\mathbb{R}^n\rightarrow\mathbb{R}^n`, @@ -592,35 +993,71 @@

    Source code for pina.adaptive_functions.adaptive_func

    # calling super super().__init__(alpha, beta, 0.0, fixed) self._func = torch.exp
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/adaptive_functions/adaptive_func_interface.html b/_modules/pina/adaptive_functions/adaptive_func_interface.html index e4e651ad..4ff3d42c 100644 --- a/_modules/pina/adaptive_functions/adaptive_func_interface.html +++ b/_modules/pina/adaptive_functions/adaptive_func_interface.html @@ -1,93 +1,459 @@ + - - - - - pina.adaptive_functions.adaptive_func_interface — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.adaptive_functions.adaptive_func_interface — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + - -
- - -
- -
-
-
-
+ + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + +
-
-
- + +
+ + +
+
+ + + + + +
+

Source code for pina.adaptive_functions.adaptive_func_interface

 """ Module for adaptive functions. """
 
@@ -97,8 +463,10 @@ 

Source code for pina.adaptive_functions.adaptive_func_interface

from abc import ABCMeta -
[docs]class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta): - r""" +
+[docs] +class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta): + r""" The :class:`~pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface` class makes a :class:`torch.nn.Module` activation function into an adaptive @@ -133,7 +501,7 @@

Source code for pina.adaptive_functions.adaptive_func_interface

""" def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): - """ + """ Initializes the Adaptive Function. :param float | complex alpha: Scaling parameter alpha. @@ -198,8 +566,10 @@

Source code for pina.adaptive_functions.adaptive_func_interface

# storing the activation self._func = None -
[docs] def forward(self, x): - """ +
+[docs] + def forward(self, x): + """ Define the computation performed at every call. The function to the input elementwise. @@ -208,62 +578,99 @@

Source code for pina.adaptive_functions.adaptive_func_interface

""" return self.alpha * (self._func(self.beta * x + self.gamma))
+ @property def alpha(self): - """ + """ The alpha variable. """ return self._alpha @property def beta(self): - """ + """ The beta variable. """ return self._beta @property def gamma(self): - """ + """ The gamma variable. """ return self._gamma @property def func(self): - """ + """ The callable activation function. """ return self._func
+
-
+ + + + + + +
+ + + +
-
+
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + - - - - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/callbacks/adaptive_refinment_callbacks.html b/_modules/pina/callbacks/adaptive_refinment_callbacks.html index 471a6bcb..226e2ba1 100644 --- a/_modules/pina/callbacks/adaptive_refinment_callbacks.html +++ b/_modules/pina/callbacks/adaptive_refinment_callbacks.html @@ -1,93 +1,459 @@ + - - - - - pina.callbacks.adaptive_refinment_callbacks — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.callbacks.adaptive_refinment_callbacks — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/callbacks/optimizer_callbacks.html b/_modules/pina/callbacks/optimizer_callbacks.html index 4052550e..8d67e334 100644 --- a/_modules/pina/callbacks/optimizer_callbacks.html +++ b/_modules/pina/callbacks/optimizer_callbacks.html @@ -1,93 +1,459 @@ + - - - - - pina.callbacks.optimizer_callbacks — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.callbacks.optimizer_callbacks — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.callbacks.optimizer_callbacks

     """PINA Callbacks Implementations"""
     
    @@ -96,10 +462,12 @@ 

    Source code for pina.callbacks.optimizer_callbacks

    from ..utils import check_consistency -
    [docs]class SwitchOptimizer(Callback): +
    +[docs] +class SwitchOptimizer(Callback): def __init__(self, new_optimizers, new_optimizers_kwargs, epoch_switch): - """ + """ PINA Implementation of a Lightning Callback to switch optimizer during training. This callback allows for switching between different optimizers during training, enabling @@ -142,8 +510,8 @@

    Source code for pina.callbacks.optimizer_callbacks

    raise ValueError( "You must define one dictionary of keyword" " arguments for each optimizers." - f" Got {len_optimizer} optimizers, and" - f" {len_optimizer_kwargs} dicitionaries" + f" Got {len_optimizer} optimizers, and" + f" {len_optimizer_kwargs} dicitionaries" ) # save new optimizers @@ -151,8 +519,10 @@

    Source code for pina.callbacks.optimizer_callbacks

    self._new_optimizers_kwargs = new_optimizers_kwargs self._epoch_switch = epoch_switch -
    [docs] def on_train_epoch_start(self, trainer, __): - """ +
    +[docs] + def on_train_epoch_start(self, trainer, __): + """ Callback function to switch optimizer at the start of each training epoch. :param trainer: The trainer object managing the training process. @@ -173,36 +543,73 @@

    Source code for pina.callbacks.optimizer_callbacks

    ) ) - trainer.optimizers = optims
    + trainer.optimizers = optims
    +
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/callbacks/processing_callbacks.html b/_modules/pina/callbacks/processing_callbacks.html index a90207ef..f53085dc 100644 --- a/_modules/pina/callbacks/processing_callbacks.html +++ b/_modules/pina/callbacks/processing_callbacks.html @@ -1,110 +1,487 @@ + - - - - - pina.callbacks.processing_callbacks — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.callbacks.processing_callbacks — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + +
+ + + + + + + +
+ + + + +
+ + + + - + + + + + + + \ No newline at end of file diff --git a/_modules/pina/condition.html b/_modules/pina/condition.html index 3385ce77..bac5bc60 100644 --- a/_modules/pina/condition.html +++ b/_modules/pina/condition.html @@ -1,93 +1,459 @@ + - - - - - pina.condition — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.condition — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - +
    + + + + + +
    +

    Source code for pina.condition

     """ Condition module. """
     
    @@ -97,12 +463,14 @@ 

    Source code for pina.condition

     
     
     def dummy(a):
    -    """Dummy function for testing purposes."""
    +    """Dummy function for testing purposes."""
         return None
     
     
    -
    [docs]class Condition: - """ +
    +[docs] +class Condition: + """ The class ``Condition`` is used to represent the constraints (physical equations, boundary conditions, etc.) that should be satisfied in the problem at hand. Condition objects are used to formulate the PINA :obj:`pina.problem.abstract_problem.AbstractProblem` object. @@ -151,21 +519,21 @@

    Source code for pina.condition

         ]
     
         def _dictvalue_isinstance(self, dict_, key_, class_):
    -        """Check if the value of a dictionary corresponding to `key` is an instance of `class_`."""
    +        """Check if the value of a dictionary corresponding to `key` is an instance of `class_`."""
             if key_ not in dict_.keys():
                 return True
     
             return isinstance(dict_[key_], class_)
     
         def __init__(self, *args, **kwargs):
    -        """
    +        """
             Constructor for the `Condition` class.
             """
             self.data_weight = kwargs.pop("data_weight", 1.0)
     
             if len(args) != 0:
                 raise ValueError(
    -                f"Condition takes only the following keyword arguments: {Condition.__slots__}."
    +                f"Condition takes only the following keyword arguments: {Condition.__slots__}."
                 )
     
             if (
    @@ -173,7 +541,7 @@ 

    Source code for pina.condition

                 and sorted(kwargs.keys()) != sorted(["location", "equation"])
                 and sorted(kwargs.keys()) != sorted(["input_points", "equation"])
             ):
    -            raise ValueError(f"Invalid keyword arguments {kwargs.keys()}.")
    +            raise ValueError(f"Invalid keyword arguments {kwargs.keys()}.")
     
             if not self._dictvalue_isinstance(kwargs, "input_points", LabelTensor):
                 raise TypeError("`input_points` must be a torch.Tensor.")
    @@ -186,35 +554,71 @@ 

    Source code for pina.condition

     
             for key, value in kwargs.items():
                 setattr(self, key, value)
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/equation/equation.html b/_modules/pina/equation/equation.html index 7ff53d21..d7a5c291 100644 --- a/_modules/pina/equation/equation.html +++ b/_modules/pina/equation/equation.html @@ -1,103 +1,471 @@ + - - - - - pina.equation.equation — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.equation.equation — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.equation.equation

     """ Module for Equation. """
     
     from .equation_interface import EquationInterface
     
     
    -
    [docs]class Equation(EquationInterface): +
    +[docs] +class Equation(EquationInterface): def __init__(self, equation): - """ + """ Equation class for specifing any equation in PINA. Each ``equation`` passed to a ``Condition`` object must be an ``Equation`` or ``SystemEquation``. @@ -106,16 +474,18 @@

    Source code for pina.equation.equation

                 evaluate the residual.
             :type equation: Callable
             """
    -        if not callable(equation):
    +        if not callable(equation):
                 raise ValueError(
                     "equation must be a callable function."
                     "Expected a callable function, got "
    -                f"{equation}"
    +                f"{equation}"
                 )
             self.__equation = equation
     
    -
    [docs] def residual(self, input_, output_, params_=None): - """ +
    +[docs] + def residual(self, input_, output_, params_=None): + """ Residual computation of the equation. :param LabelTensor input_: Input points to evaluate the equation. @@ -136,36 +506,73 @@

    Source code for pina.equation.equation

                 result = self.__equation(input_, output_)
             else:
                 result = self.__equation(input_, output_, params_)
    -        return result
    + return result
    +
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/equation/equation_factory.html b/_modules/pina/equation/equation_factory.html index c5f90210..fe881c9b 100644 --- a/_modules/pina/equation/equation_factory.html +++ b/_modules/pina/equation/equation_factory.html @@ -1,93 +1,459 @@ + - - - - - pina.equation.equation_factory — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.equation.equation_factory — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.equation.equation_factory

     """ Module """
     
    @@ -95,10 +461,12 @@ 

    Source code for pina.equation.equation_factory

    from ..operators import grad, div, laplacian -

    [docs]class FixedValue(Equation): +
    +[docs] +class FixedValue(Equation): def __init__(self, value, components=None): - """ + """ Fixed Value Equation class. This class can be used to enforced a fixed value for a specific condition, e.g. Dirichlet Boundary conditions. @@ -119,10 +487,13 @@

    Source code for pina.equation.equation_factory

    super().__init__(equation)

    -
    [docs]class FixedGradient(Equation): + +
    +[docs] +class FixedGradient(Equation): def __init__(self, value, components=None, d=None): - """ + """ Fixed Gradient Equation class. This class can be used to enforced a fixed gradient for a specific condition. @@ -145,10 +516,13 @@

    Source code for pina.equation.equation_factory

    super().__init__(equation)

    -
    [docs]class FixedFlux(Equation): + +
    +[docs] +class FixedFlux(Equation): def __init__(self, value, components=None, d=None): - """ + """ Fixed Flux Equation class. This class can be used to enforced a fixed flux for a specific condition. @@ -171,10 +545,13 @@

    Source code for pina.equation.equation_factory

    super().__init__(equation)

    -
    [docs]class Laplace(Equation): + +
    +[docs] +class Laplace(Equation): def __init__(self, components=None, d=None): - """ + """ Laplace Equation class. This class can be used to enforced a Laplace equation for a specific condition (force term set to zero). @@ -194,35 +571,71 @@

    Source code for pina.equation.equation_factory

    return laplacian(output_, input_, components=components, d=d) super().__init__(equation)

    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/equation/equation_interface.html b/_modules/pina/equation/equation_interface.html index 59a5e531..6ab7debd 100644 --- a/_modules/pina/equation/equation_interface.html +++ b/_modules/pina/equation/equation_interface.html @@ -1,101 +1,469 @@ + - - - - - pina.equation.equation_interface — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.equation.equation_interface — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.equation.equation_interface

     """ Module for EquationInterface class """
     
     from abc import ABCMeta, abstractmethod
     
     
    -
    [docs]class EquationInterface(metaclass=ABCMeta): - """ +
    +[docs] +class EquationInterface(metaclass=ABCMeta): + """ The abstract `AbstractProblem` class. All the class defining a PINA Problem should be inheritied from this class. @@ -104,9 +472,11 @@

    Source code for pina.equation.equation_interface

    conditions are applied. """ -
    [docs] @abstractmethod +
    +[docs] + @abstractmethod def residual(self, input_, output_, params_): - """ + """ Residual computation of the equation. :param LabelTensor input_: Input points to evaluate the equation. @@ -116,36 +486,73 @@

    Source code for pina.equation.equation_interface

    :return: The residual evaluation of the specified equation. :rtype: LabelTensor """ - pass
    + pass
    +
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/equation/system_equation.html b/_modules/pina/equation/system_equation.html index 02afed78..7bcd08a3 100644 --- a/_modules/pina/equation/system_equation.html +++ b/_modules/pina/equation/system_equation.html @@ -1,93 +1,459 @@ + - - - - - pina.equation.system_equation — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.equation.system_equation — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.equation.system_equation

     """ Module for SystemEquation. """
     
    @@ -96,10 +462,12 @@ 

    Source code for pina.equation.system_equation

    from ..utils import check_consistency -
    [docs]class SystemEquation(Equation): +
    +[docs] +class SystemEquation(Equation): - def __init__(self, list_equation, reduction="mean"): - """ + def __init__(self, list_equation, reduction=None): + """ System of Equation class for specifing any system of equations in PINA. Each ``equation`` passed to a ``Condition`` object @@ -110,14 +478,13 @@

    Source code for pina.equation.system_equation

    :param Callable equation: A ``torch`` callable equation to evaluate the residual :param str reduction: Specifies the reduction to apply to the output: - ``none`` | ``mean`` | ``sum`` | ``callable``. ``none``: no reduction - will be applied, ``mean``: the sum of the output will be divided + None | ``mean`` | ``sum`` | callable. None: no reduction + will be applied, ``mean``: the output sum will be divided by the number of elements in the output, ``sum``: the output will - be summed. ``callable`` a callable function to perform reduction, - no checks guaranteed. Default: ``mean``. + be summed. *callable* is a callable function to perform reduction, + no checks guaranteed. Default: None. """ check_consistency([list_equation], list) - check_consistency(reduction, str) # equations definition self.equations = [] @@ -129,15 +496,17 @@

    Source code for pina.equation.system_equation

    self.reduction = torch.mean elif reduction == "sum": self.reduction = torch.sum - elif (reduction == "none") or callable(reduction): + elif (reduction == None) or callable(reduction): self.reduction = reduction else: raise NotImplementedError( "Only mean and sum reductions implemented." ) -
    [docs] def residual(self, input_, output_, params_=None): - """ +
    +[docs] + def residual(self, input_, output_, params_=None): + """ Residual computation for the equations of the system. :param LabelTensor input_: Input points to evaluate the system of @@ -163,39 +532,76 @@

    Source code for pina.equation.system_equation

    ] ) - if self.reduction == "none": + if self.reduction is None: return residual - return self.reduction(residual, dim=-1)
    + return self.reduction(residual, dim=-1)
    +
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/geometry/cartesian.html b/_modules/pina/geometry/cartesian.html index 51f208c7..681c76b7 100644 --- a/_modules/pina/geometry/cartesian.html +++ b/_modules/pina/geometry/cartesian.html @@ -1,93 +1,459 @@ + - - - - - pina.geometry.cartesian — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.geometry.cartesian — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.geometry.cartesian

     import torch
     
    @@ -96,11 +462,13 @@ 

    Source code for pina.geometry.cartesian

     from ..utils import torch_lhs, chebyshev_roots
     
     
    -
    [docs]class CartesianDomain(Location): - """PINA implementation of Hypercube domain.""" +
    +[docs] +class CartesianDomain(Location): + """PINA implementation of Hypercube domain.""" def __init__(self, cartesian_dict): - """ + """ :param cartesian_dict: A dictionary with dict-key a string representing the input variables for the pinn, and dict-value a list with the domain extrema. @@ -122,15 +490,17 @@

    Source code for pina.geometry.cartesian

     
         @property
         def variables(self):
    -        """Spatial variables.
    +        """Spatial variables.
     
             :return: Spatial variables defined in ``__init__()``
             :rtype: list[str]
             """
             return sorted(list(self.fixed_.keys()) + list(self.range_.keys()))
     
    -
    [docs] def update(self, new_domain): - """Adding new dimensions on the ``CartesianDomain`` +
    +[docs] + def update(self, new_domain): + """Adding new dimensions on the ``CartesianDomain`` :param CartesianDomain new_domain: A new ``CartesianDomain`` object to merge @@ -146,8 +516,9 @@

    Source code for pina.geometry.cartesian

             self.fixed_.update(new_domain.fixed_)
             self.range_.update(new_domain.range_)
    + def _sample_range(self, n, mode, bounds): - """Rescale the samples to the correct bounds + """Rescale the samples to the correct bounds :param n: Number of points to sample, see Note below for reference. @@ -181,8 +552,10 @@

    Source code for pina.geometry.cartesian

     
             return pts
     
    -
    [docs] def sample(self, n, mode="random", variables="all"): - """Sample routine. +
    +[docs] + def sample(self, n, mode="random", variables="all"): + """Sample routine. :param n: Number of points to sample, see Note below for reference. @@ -236,7 +609,7 @@

    Source code for pina.geometry.cartesian

             """
     
             def _1d_sampler(n, mode, variables):
    -            """Sample independentely the variables and cross the results"""
    +            """Sample independentely the variables and cross the results"""
                 tmp = []
                 for variable in variables:
                     if variable in self.range_.keys():
    @@ -265,7 +638,7 @@ 

    Source code for pina.geometry.cartesian

                 return result
     
             def _Nd_sampler(n, mode, variables):
    -            """Sample all the variables together
    +            """Sample all the variables together
     
                 :param n: Number of points to sample.
                 :type n: int
    @@ -299,7 +672,7 @@ 

    Source code for pina.geometry.cartesian

                 return result
     
             def _single_points_sample(n, variables):
    -            """Sample a single point in one dimension.
    +            """Sample a single point in one dimension.
     
                 :param n: Number of points to sample.
                 :type n: int
    @@ -334,10 +707,13 @@ 

    Source code for pina.geometry.cartesian

             elif mode in ["random", "lh", "latin"]:
                 return _Nd_sampler(n, mode, variables)
             else:
    -            raise ValueError(f"mode={mode} is not valid.")
    + raise ValueError(f"mode={mode} is not valid.")
    -
    [docs] def is_inside(self, point, check_border=False): - """Check if a point is inside the ellipsoid. + +
    +[docs] + def is_inside(self, point, check_border=False): + """Check if a point is inside the ellipsoid. :param point: Point to be checked :type point: LabelTensor @@ -365,36 +741,73 @@

    Source code for pina.geometry.cartesian

     
                     is_inside.append(check)
     
    -        return all(is_inside)
    + return all(is_inside)
    +
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/geometry/difference_domain.html b/_modules/pina/geometry/difference_domain.html index 1b376a24..63687ff2 100644 --- a/_modules/pina/geometry/difference_domain.html +++ b/_modules/pina/geometry/difference_domain.html @@ -1,93 +1,459 @@ + - - - - - pina.geometry.difference_domain — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.geometry.difference_domain — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/geometry/ellipsoid.html b/_modules/pina/geometry/ellipsoid.html index 86386599..ca6a9ea1 100644 --- a/_modules/pina/geometry/ellipsoid.html +++ b/_modules/pina/geometry/ellipsoid.html @@ -1,93 +1,459 @@ + - - - - - pina.geometry.ellipsoid — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.geometry.ellipsoid — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/geometry/exclusion_domain.html b/_modules/pina/geometry/exclusion_domain.html index 7e2adff0..55de739e 100644 --- a/_modules/pina/geometry/exclusion_domain.html +++ b/_modules/pina/geometry/exclusion_domain.html @@ -1,93 +1,459 @@ + - - - - - pina.geometry.exclusion_domain — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.geometry.exclusion_domain — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/geometry/intersection_domain.html b/_modules/pina/geometry/intersection_domain.html index 854b6a25..c337e105 100644 --- a/_modules/pina/geometry/intersection_domain.html +++ b/_modules/pina/geometry/intersection_domain.html @@ -1,93 +1,459 @@ + - - - - - pina.geometry.intersection_domain — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.geometry.intersection_domain — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/geometry/location.html b/_modules/pina/geometry/location.html index 38eb0cf8..48c8390c 100644 --- a/_modules/pina/geometry/location.html +++ b/_modules/pina/geometry/location.html @@ -1,116 +1,489 @@ + - - - - - pina.geometry.location — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.geometry.location — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/geometry/operation_interface.html b/_modules/pina/geometry/operation_interface.html index e1581ec0..3e4253f1 100644 --- a/_modules/pina/geometry/operation_interface.html +++ b/_modules/pina/geometry/operation_interface.html @@ -1,93 +1,459 @@ + - - - - - pina.geometry.operation_interface — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.geometry.operation_interface — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.geometry.operation_interface

     """ Module for OperationInterface class. """
     
    @@ -96,10 +462,12 @@ 

    Source code for pina.geometry.operation_interface

    from abc import ABCMeta, abstractmethod -
    [docs]class OperationInterface(Location, metaclass=ABCMeta): +
    +[docs] +class OperationInterface(Location, metaclass=ABCMeta): def __init__(self, geometries): - """ + """ Abstract set operation class. Any geometry operation entity must inherit from this class. :param list geometries: A list of geometries from ``pina.geometry`` @@ -117,14 +485,14 @@

    Source code for pina.geometry.operation_interface

    @property def geometries(self): - """ + """ The geometries to perform set operation. """ return self._geometries @property def variables(self): - """ + """ Spatial variables of the domain. :return: All the variables defined in ``__init__`` in order. @@ -132,9 +500,11 @@

    Source code for pina.geometry.operation_interface

    """ return self.geometries[0].variables -
    [docs] @abstractmethod +
    +[docs] + @abstractmethod def is_inside(self, point, check_border=False): - """ + """ Check if a point is inside the resulting domain after a set operation is applied. @@ -146,8 +516,9 @@

    Source code for pina.geometry.operation_interface

    """ pass
    + def _check_dimensions(self, geometries): - """Check if the dimensions of the geometries are consistent. + """Check if the dimensions of the geometries are consistent. :param geometries: Geometries to be checked. :type geometries: list[Location] @@ -157,35 +528,71 @@

    Source code for pina.geometry.operation_interface

    raise NotImplementedError( f"The geometries need to have same dimensions and labels." )
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/geometry/simplex.html b/_modules/pina/geometry/simplex.html index d1ee11e1..a07d9dcd 100644 --- a/_modules/pina/geometry/simplex.html +++ b/_modules/pina/geometry/simplex.html @@ -1,93 +1,459 @@ + - - - - - pina.geometry.simplex — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.geometry.simplex — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/geometry/union_domain.html b/_modules/pina/geometry/union_domain.html index 4d4bb643..ea7ad962 100644 --- a/_modules/pina/geometry/union_domain.html +++ b/_modules/pina/geometry/union_domain.html @@ -1,93 +1,459 @@ + - - - - - pina.geometry.union_domain — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.geometry.union_domain — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/label_tensor.html b/_modules/pina/label_tensor.html index 68448ef0..8d94fa80 100644 --- a/_modules/pina/label_tensor.html +++ b/_modules/pina/label_tensor.html @@ -1,93 +1,459 @@ + - - - - - pina.label_tensor — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.label_tensor — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.label_tensor

     """ Module for LabelTensor """
     
    @@ -96,15 +462,17 @@ 

    Source code for pina.label_tensor

     from torch import Tensor
     
     
    -
    [docs]class LabelTensor(torch.Tensor): - """Torch tensor with a label for any column.""" +
    +[docs] +class LabelTensor(torch.Tensor): + """Torch tensor with a label for any column.""" @staticmethod def __new__(cls, x, labels, *args, **kwargs): return super().__new__(cls, x, *args, **kwargs) def __init__(self, x, labels): - """ + """ Construct a `LabelTensor` by passing a tensor and a list of column labels. Such labels uniquely identify the columns of the tensor, allowing for an easier manipulation. @@ -171,7 +539,7 @@

    Source code for pina.label_tensor

             self._labels = labels
     
         def __deepcopy__(self, __):
    -        """
    +        """
             Implements deepcopy for label tensor. By default it stores the
             current labels and use the :meth:`~torch._tensor.Tensor.__deepcopy__`
             method for creating a new :class:`pina.label_tensor.LabelTensor`.
    @@ -187,7 +555,7 @@ 

    Source code for pina.label_tensor

     
         @property
         def labels(self):
    -        """Property decorator for labels
    +        """Property decorator for labels
     
             :return: labels of self
             :rtype: list
    @@ -204,9 +572,11 @@ 

    Source code for pina.label_tensor

     
             self._labels = labels  # assign the label
     
    -
    [docs] @staticmethod +
    +[docs] + @staticmethod def vstack(label_tensors): - """ + """ Stack tensors vertically. For more details, see :meth:`torch.vstack`. @@ -226,8 +596,11 @@

    Source code for pina.label_tensor

             tensors = [lt.extract(labels) for lt in label_tensors]
             return LabelTensor(torch.vstack(tensors), labels)
    -
    [docs] def clone(self, *args, **kwargs): - """ + +
    +[docs] + def clone(self, *args, **kwargs): + """ Clone the LabelTensor. For more details, see :meth:`torch.Tensor.clone`. @@ -242,8 +615,11 @@

    Source code for pina.label_tensor

             out = LabelTensor(super().clone(*args, **kwargs), self.labels)
             return out
    -
    [docs] def to(self, *args, **kwargs): - """ + +
    +[docs] + def to(self, *args, **kwargs): + """ Performs Tensor dtype and/or device conversion. For more details, see :meth:`torch.Tensor.to`. """ @@ -252,16 +628,22 @@

    Source code for pina.label_tensor

             new.data = tmp.data
             return new
    -
    [docs] def select(self, *args, **kwargs): - """ + +
    +[docs] + def select(self, *args, **kwargs): + """ Performs Tensor selection. For more details, see :meth:`torch.Tensor.select`. """ tmp = super().select(*args, **kwargs) tmp._labels = self._labels return tmp
    -
    [docs] def cuda(self, *args, **kwargs): - """ + +
    +[docs] + def cuda(self, *args, **kwargs): + """ Send Tensor to cuda. For more details, see :meth:`torch.Tensor.cuda`. """ tmp = super().cuda(*args, **kwargs) @@ -269,8 +651,11 @@

    Source code for pina.label_tensor

             new.data = tmp.data
             return new
    -
    [docs] def cpu(self, *args, **kwargs): - """ + +
    +[docs] + def cpu(self, *args, **kwargs): + """ Send Tensor to cpu. For more details, see :meth:`torch.Tensor.cpu`. """ tmp = super().cpu(*args, **kwargs) @@ -278,8 +663,11 @@

    Source code for pina.label_tensor

             new.data = tmp.data
             return new
    -
    [docs] def extract(self, label_to_extract): - """ + +
    +[docs] + def extract(self, label_to_extract): + """ Extract the subset of the original tensor by returning all the columns corresponding to the passed ``label_to_extract``. @@ -303,7 +691,7 @@

    Source code for pina.label_tensor

                 try:
                     indeces.append(self.labels.index(f))
                 except ValueError:
    -                raise ValueError(f"`{f}` not in the labels list")
    +                raise ValueError(f"`{f}` not in the labels list")
     
             new_data = super(Tensor, self.T).__getitem__(indeces).T
             new_labels = [self.labels[idx] for idx in indeces]
    @@ -313,19 +701,28 @@ 

    Source code for pina.label_tensor

     
             return extracted_tensor
    -
    [docs] def detach(self): + +
    +[docs] + def detach(self): detached = super().detach() if hasattr(self, "_labels"): detached._labels = self._labels return detached
    -
    [docs] def requires_grad_(self, mode=True): + +
    +[docs] + def requires_grad_(self, mode=True): lt = super().requires_grad_(mode) lt.labels = self.labels return lt
    -
    [docs] def append(self, lt, mode="std"): - """ + +
    +[docs] + def append(self, lt, mode="std"): + """ Return a copy of the merged tensors. :param LabelTensor lt: The tensor to merge. @@ -357,8 +754,9 @@

    Source code for pina.label_tensor

             new_tensor.labels = new_labels
             return new_tensor
    + def __getitem__(self, index): - """ + """ Return a copy of the selected tensor. """ @@ -402,40 +800,76 @@

    Source code for pina.label_tensor

     
         def __str__(self):
             if hasattr(self, "labels"):
    -            s = f"labels({str(self.labels)})\n"
    +            s = f"labels({str(self.labels)})\n"
             else:
                 s = "no labels\n"
             s += super().__str__()
             return s
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/loss.html b/_modules/pina/loss.html index e53fead5..541313f3 100644 --- a/_modules/pina/loss.html +++ b/_modules/pina/loss.html @@ -1,93 +1,459 @@ + - - - - - pina.loss — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.loss — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - + +
    + + +
    +
    + + + + + +
    +

    Source code for pina.loss

     """ Module for Loss class """
     
    @@ -99,14 +465,16 @@ 

    Source code for pina.loss

     __all__ = ["LossInterface", "LpLoss", "PowerLoss"]
     
     
    -
    [docs]class LossInterface(_Loss, metaclass=ABCMeta): - """ +
    +[docs] +class LossInterface(_Loss, metaclass=ABCMeta): + """ The abstract ``LossInterface`` class. All the class defining a PINA Loss should be inheritied from this class. """ def __init__(self, reduction="mean"): - """ + """ :param str reduction: Specifies the reduction to apply to the output: ``none`` | ``mean`` | ``sum``. When ``none``: no reduction will be applied, ``mean``: the sum of the output will be divided @@ -117,9 +485,11 @@

    Source code for pina.loss

             """
             super().__init__(reduction=reduction, size_average=None, reduce=None)
     
    -
    [docs] @abstractmethod +
    +[docs] + @abstractmethod def forward(self, input, target): - """Forward method for loss function. + """Forward method for loss function. :param torch.Tensor input: Input tensor from real data. :param torch.Tensor target: Model tensor output. @@ -128,8 +498,9 @@

    Source code for pina.loss

             """
             pass
    + def _reduction(self, loss): - """Simple helper function to check reduction + """Simple helper function to check reduction :param reduction: Specifies the reduction to apply to the output: ``none`` | ``mean`` | ``sum``. When ``none``: no reduction @@ -155,8 +526,11 @@

    Source code for pina.loss

             return ret
    -
    [docs]class LpLoss(LossInterface): - r""" + +
    +[docs] +class LpLoss(LossInterface): + r""" The Lp loss implementation class. Creates a criterion that measures the Lp error between each element in the input :math:`x` and target :math:`y`. @@ -193,7 +567,7 @@

    Source code for pina.loss

         """
     
         def __init__(self, p=2, reduction="mean", relative=False):
    -        """
    +        """
             :param int p: Degree of Lp norm. It specifies the type of norm to
                 be calculated. See `list of possible orders in torch linalg
                 <https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
    @@ -214,8 +588,10 @@ 

    Source code for pina.loss

             self.p = p
             self.relative = relative
     
    -
    [docs] def forward(self, input, target): - """Forward method for loss function. +
    +[docs] + def forward(self, input, target): + """Forward method for loss function. :param torch.Tensor input: Input tensor from real data. :param torch.Tensor target: Model tensor output. @@ -225,11 +601,15 @@

    Source code for pina.loss

             loss = torch.linalg.norm((input - target), ord=self.p, dim=-1)
             if self.relative:
                 loss = loss / torch.linalg.norm(input, ord=self.p, dim=-1)
    -        return self._reduction(loss)
    + return self._reduction(loss)
    +
    -
    [docs]class PowerLoss(LossInterface): - r""" + +
    +[docs] +class PowerLoss(LossInterface): + r""" The PowerLoss loss implementation class. Creates a criterion that measures the error between each element in the input :math:`x` and target :math:`y` powered to a specific integer. @@ -266,7 +646,7 @@

    Source code for pina.loss

         """
     
         def __init__(self, p=2, reduction="mean", relative=False):
    -        """
    +        """
             :param int p: Degree of Lp norm. It specifies the type of norm to
                 be calculated. See `list of possible orders in torch linalg
                 <https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
    @@ -286,8 +666,10 @@ 

    Source code for pina.loss

             check_consistency(relative, bool)
             self.relative = relative
     
    -
    [docs] def forward(self, input, target): - """Forward method for loss function. +
    +[docs] + def forward(self, input, target): + """Forward method for loss function. :param torch.Tensor input: Input tensor from real data. :param torch.Tensor target: Model tensor output. @@ -297,36 +679,73 @@

    Source code for pina.loss

             loss = torch.abs((input - target)).pow(self.p).mean(-1)
             if self.relative:
                 loss = loss / torch.abs(input).pow(self.p).mean(-1)
    -        return self._reduction(loss)
    + return self._reduction(loss)
    +
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/avno.html b/_modules/pina/model/avno.html index 31926128..3c58a986 100644 --- a/_modules/pina/model/avno.html +++ b/_modules/pina/model/avno.html @@ -1,93 +1,459 @@ + - - - - - pina.model.avno — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.avno — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.model.avno

     """Module Averaging Neural Operator."""
     
    @@ -98,8 +464,10 @@ 

    Source code for pina.model.avno

     from pina.utils import check_consistency
     
     
    -
    [docs]class AveragingNeuralOperator(KernelNeuralOperator): - """ +
    +[docs] +class AveragingNeuralOperator(KernelNeuralOperator): + """ Implementation of Averaging Neural Operator. Averaging Neural Operator is a general architecture for @@ -126,7 +494,7 @@

    Source code for pina.model.avno

             n_layers=4,
             func=nn.GELU,
         ):
    -        """
    +        """
             :param torch.nn.Module lifting_net: The neural network for lifting
                 the input. It must take as input the input field and the coordinates
                 at which the input field is avaluated. The output of the lifting
    @@ -182,8 +550,10 @@ 

    Source code for pina.model.avno

             )
             super().__init__(lifting_net, integral_net, projecting_net)
     
    -
    [docs] def forward(self, x): - r""" +
    +[docs] + def forward(self, x): + r""" Forward computation for Averaging Neural Operator. It performs a lifting of the input by the ``lifting_net``. Then different layers of Averaging Neural Operator Blocks are applied. @@ -206,36 +576,73 @@

    Source code for pina.model.avno

             new_batch = self._integral_kernels(new_batch)
             new_batch = concatenate((new_batch, points_tmp), dim=-1)
             new_batch = self._projection_operator(new_batch)
    -        return new_batch
    + return new_batch
    +
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/base_no.html b/_modules/pina/model/base_no.html index 2a660730..fe170b24 100644 --- a/_modules/pina/model/base_no.html +++ b/_modules/pina/model/base_no.html @@ -1,93 +1,459 @@ + - - - - - pina.model.base_no — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.base_no — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + - -
- - -
- -
-
-
-
+ + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + +
-
-
- + +
+ + +
+
+ + + + + +
+

Source code for pina.model.base_no

 """
 Kernel Neural Operator Module.
@@ -97,8 +463,10 @@ 

Source code for pina.model.base_no

 from pina.utils import check_consistency
 
 
-
[docs]class KernelNeuralOperator(torch.nn.Module): - r""" +
+[docs] +class KernelNeuralOperator(torch.nn.Module): + r""" Base class for composing Neural Operators with integral kernels. This is a base class for composing neural operators with multiple @@ -137,7 +505,7 @@

Source code for pina.model.base_no

     """
 
     def __init__(self, lifting_operator, integral_kernels, projection_operator):
-        """
+        """
         :param torch.nn.Module lifting_operator: The lifting operator
             mapping the input to its hidden dimension.
         :param torch.nn.Module integral_kernels: List of integral kernels
@@ -154,14 +522,14 @@ 

Source code for pina.model.base_no

 
     @property
     def lifting_operator(self):
-        """
+        """
         The lifting operator property.
         """
         return self._lifting_operator
 
     @lifting_operator.setter
     def lifting_operator(self, value):
-        """
+        """
         The lifting operator setter
 
         :param torch.nn.Module value: The lifting operator torch module.
@@ -171,14 +539,14 @@ 

Source code for pina.model.base_no

 
     @property
     def projection_operator(self):
-        """
+        """
         The projection operator property.
         """
         return self._projection_operator
 
     @projection_operator.setter
     def projection_operator(self, value):
-        """
+        """
         The projection operator setter
 
         :param torch.nn.Module value: The projection operator torch module.
@@ -188,14 +556,14 @@ 

Source code for pina.model.base_no

 
     @property
     def integral_kernels(self):
-        """
+        """
         The integral kernels operator property.
         """
         return self._integral_kernels
 
     @integral_kernels.setter
     def integral_kernels(self, value):
-        """
+        """
         The integral kernels operator setter
 
         :param torch.nn.Module value: The integral kernels operator torch
@@ -204,8 +572,10 @@ 

Source code for pina.model.base_no

         check_consistency(value, torch.nn.Module)
         self._integral_kernels = value
 
-
[docs] def forward(self, x): - r""" +
+[docs] + def forward(self, x): + r""" Forward computation for Base Neural Operator. It performs a lifting of the input by the ``lifting_operator``. Then different layers integral kernels are applied using @@ -225,36 +595,73 @@

Source code for pina.model.base_no

         x = self.lifting_operator(x)
         x = self.integral_kernels(x)
         x = self.projection_operator(x)
-        return x
+ return x
+
+
-
+
+ + + + + +
+ + + +
-
+
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + - - - - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/deeponet.html b/_modules/pina/model/deeponet.html index e47c112d..8f2fa44f 100644 --- a/_modules/pina/model/deeponet.html +++ b/_modules/pina/model/deeponet.html @@ -1,93 +1,459 @@ + - - - - - pina.model.deeponet — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.deeponet — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.model.deeponet

     """Module for DeepONet model"""
     
    @@ -97,8 +463,10 @@ 

    Source code for pina.model.deeponet

     from functools import partial
     
     
    -
    [docs]class MIONet(torch.nn.Module): - """ +
    +[docs] +class MIONet(torch.nn.Module): + """ The PINA implementation of MIONet network. MIONet is a general architecture for learning Operators defined @@ -124,7 +492,7 @@

    Source code for pina.model.deeponet

             scale=True,
             translation=True,
         ):
    -        """
    +        """
             :param dict networks: The neural networks to use as
                 models. The ``dict`` takes as key a neural network, and
                 as value the list of indeces to extract from the input variable
    @@ -241,7 +609,7 @@ 

    Source code for pina.model.deeponet

     
         @staticmethod
         def _symbol_functions(**kwargs):
    -        """
    +        """
             Return a dictionary of functions that can be used as aggregators or
             reductions.
             """
    @@ -260,7 +628,7 @@ 

    Source code for pina.model.deeponet

             elif isinstance(aggregator, nn.Module) or is_function(aggregator):
                 aggregator_func = aggregator
             else:
    -            raise ValueError(f"Unsupported aggregation: {str(aggregator)}")
    +            raise ValueError(f"Unsupported aggregation: {str(aggregator)}")
     
             self._aggregator = aggregator_func
     
    @@ -271,7 +639,7 @@ 

    Source code for pina.model.deeponet

             elif isinstance(reduction, nn.Module) or is_function(reduction):
                 reduction_func = reduction
             else:
    -            raise ValueError(f"Unsupported reduction: {reduction}")
    +            raise ValueError(f"Unsupported reduction: {reduction}")
     
             self._reduction = reduction_func
     
    @@ -294,8 +662,10 @@ 

    Source code for pina.model.deeponet

                     " For more information refer to warning in the documentation."
                 )
     
    -
    [docs] def forward(self, x): - """ +
    +[docs] + def forward(self, x): + """ Defines the computation performed at every call. :param LabelTensor or torch.Tensor x: The input tensor for the forward call. @@ -321,51 +691,55 @@

    Source code for pina.model.deeponet

     
             return output_
    + @property def aggregator(self): - """ + """ The aggregator function. """ return self._aggregator @property def reduction(self): - """ + """ The translation factor. """ return self._reduction @property def scale(self): - """ + """ The scale factor. """ return self._scale @property def translation(self): - """ + """ The translation factor for MIONet. """ return self._trasl @property def indeces_variables_extracted(self): - """ + """ The input indeces for each model in form of list. """ return self._indeces @property def model(self): - """ + """ The models in form of list. """ return self._indeces
    -
    [docs]class DeepONet(MIONet): - """ + +
    +[docs] +class DeepONet(MIONet): + """ The PINA implementation of DeepONet network. DeepONet is a general architecture for learning Operators. Unlike @@ -394,7 +768,7 @@

    Source code for pina.model.deeponet

             scale=True,
             translation=True,
         ):
    -        """
    +        """
             :param torch.nn.Module branch_net: The neural network to use as branch
                 model. It has to take as input a :py:obj:`pina.label_tensor.LabelTensor`
                 or :class:`torch.Tensor`. The number of dimensions of the output has
    @@ -478,8 +852,10 @@ 

    Source code for pina.model.deeponet

                 translation=translation,
             )
     
    -
    [docs] def forward(self, x): - """ +
    +[docs] + def forward(self, x): + """ Defines the computation performed at every call. :param LabelTensor or torch.Tensor x: The input tensor for the forward call. @@ -488,48 +864,85 @@

    Source code for pina.model.deeponet

             """
             return super().forward(x)
    + @property def branch_net(self): - """ + """ The branch net for DeepONet. """ return self.models[0] @property def trunk_net(self): - """ + """ The trunk net for DeepONet. """ return self.models[1]
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/feed_forward.html b/_modules/pina/model/feed_forward.html index 97fbf509..a3f7e75d 100644 --- a/_modules/pina/model/feed_forward.html +++ b/_modules/pina/model/feed_forward.html @@ -1,93 +1,459 @@ + - - - - - pina.model.feed_forward — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.feed_forward — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.model.feed_forward

     """Module for FeedForward model"""
     
    @@ -97,8 +463,10 @@ 

    Source code for pina.model.feed_forward

     from .layers.residual import EnhancedLinear
     
     
    -
    [docs]class FeedForward(torch.nn.Module): - """ +
    +[docs] +class FeedForward(torch.nn.Module): + """ The PINA implementation of feedforward network, also refered as multilayer perceptron. @@ -131,7 +499,7 @@

    Source code for pina.model.feed_forward

             layers=None,
             bias=True,
         ):
    -        """ """
    +        """ """
             super().__init__()
     
             if not isinstance(input_dimensions, int):
    @@ -171,8 +539,10 @@ 

    Source code for pina.model.feed_forward

     
             self.model = nn.Sequential(*unique_list)
     
    -
    [docs] def forward(self, x): - """ +
    +[docs] + def forward(self, x): + """ Defines the computation performed at every call. :param x: The tensor to apply the forward pass. @@ -180,11 +550,15 @@

    Source code for pina.model.feed_forward

             :return: the output computed by the model.
             :rtype: torch.Tensor
             """
    -        return self.model(x)
    + return self.model(x)
    +
    -
    [docs]class ResidualFeedForward(torch.nn.Module): - """ + +
    +[docs] +class ResidualFeedForward(torch.nn.Module): + """ The PINA implementation of feedforward network, also with skipped connection and transformer network, as presented in **Understanding and mitigating gradient pathologies in physics-informed neural networks** @@ -228,7 +602,7 @@

    Source code for pina.model.feed_forward

             bias=True,
             transformer_nets=None,
         ):
    -        """ """
    +        """ """
             super().__init__()
     
             # check type consistency
    @@ -314,8 +688,10 @@ 

    Source code for pina.model.feed_forward

                 unique_list.append(EnhancedLinear(layer=layer, activation=func))
             self.inner_layers = torch.nn.Sequential(*unique_list)
     
    -
    [docs] def forward(self, x): - """ +
    +[docs] + def forward(self, x): + """ Defines the computation performed at every call. :param x: The tensor to apply the forward pass. @@ -334,36 +710,73 @@

    Source code for pina.model.feed_forward

                 x = (1.0 - x) * input_[0] + x * input_[1]
     
             # last layer
    -        return self.last_layer(x)
    + return self.last_layer(x)
    +
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/fno.html b/_modules/pina/model/fno.html index 7ccf3ad0..0e640440 100644 --- a/_modules/pina/model/fno.html +++ b/_modules/pina/model/fno.html @@ -1,93 +1,459 @@ + - - - - - pina.model.fno — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.fno — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.model.fno

     """
     Fourier Neural Operator Module.
    @@ -102,8 +468,10 @@ 

    Source code for pina.model.fno

     from .base_no import KernelNeuralOperator
     
     
    -
    [docs]class FourierIntegralKernel(torch.nn.Module): - """ +
    +[docs] +class FourierIntegralKernel(torch.nn.Module): + """ Implementation of Fourier Integral Kernel network. This class implements the Fourier Integral Kernel network, which is a @@ -133,7 +501,7 @@

    Source code for pina.model.fno

             func=nn.Tanh,
             layers=None,
         ):
    -        """
    +        """
             :param int input_numb_fields: Number of input fields.
             :param int output_numb_fields: Number of output fields.
             :param int | list[int] n_modes: Number of modes.
    @@ -232,8 +600,10 @@ 

    Source code for pina.model.fno

                 val for pair in zip([0] * dimensions, padding) for val in pair
             ]
     
    -
    [docs] def forward(self, x): - """ +
    +[docs] + def forward(self, x): + """ Forward computation for Fourier Neural Operator. It performs a lifting of the input by the ``lifting_net``. Then different layers of Fourier Blocks are applied. Finally the output is projected @@ -273,11 +643,15 @@

    Source code for pina.model.fno

             permutation_idx = [0, *[i for i in range(2, x.ndim)], 1]
             x = x.permute(permutation_idx)
     
    -        return x
    + return x
    +
    -
    [docs]class FNO(KernelNeuralOperator): - """ + +
    +[docs] +class FNO(KernelNeuralOperator): + """ The PINA implementation of Fourier Neural Operator network. Fourier Neural Operator (FNO) is a general architecture for @@ -309,7 +683,7 @@

    Source code for pina.model.fno

             func=nn.Tanh,
             layers=None,
         ):
    -        """
    +        """
             :param torch.nn.Module lifting_net: The neural network for lifting
                 the input.
             :param torch.nn.Module projecting_net: The neural network for
    @@ -343,8 +717,10 @@ 

    Source code for pina.model.fno

                 ),
             )
     
    -
    [docs] def forward(self, x): - """ +
    +[docs] + def forward(self, x): + """ Forward computation for Fourier Neural Operator. It performs a lifting of the input by the ``lifting_net``. Then different layers of Fourier Blocks are applied. Finally the output is projected @@ -360,36 +736,73 @@

    Source code for pina.model.fno

             :return: The output tensor obtained from FNO.
             :rtype: torch.Tensor
             """
    -        return super().forward(x)
    + return super().forward(x)
    +
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/layers/avno_layer.html b/_modules/pina/model/layers/avno_layer.html index 326d666c..bb532604 100644 --- a/_modules/pina/model/layers/avno_layer.html +++ b/_modules/pina/model/layers/avno_layer.html @@ -1,93 +1,459 @@ + - - - - - pina.model.layers.avno_layer — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.layers.avno_layer — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.model.layers.avno_layer

     """ Module for Averaging Neural Operator Layer class. """
     
    @@ -95,8 +461,10 @@ 

    Source code for pina.model.layers.avno_layer

    from pina.utils import check_consistency
     
     
    -
    [docs]class AVNOBlock(nn.Module): - r""" +
    +[docs] +class AVNOBlock(nn.Module): + r""" The PINA implementation of the inner layer of the Averaging Neural Operator. The operator layer performs an affine transformation where the convolution @@ -127,7 +495,7 @@

    Source code for pina.model.layers.avno_layer

        """
     
         def __init__(self, hidden_size=100, func=nn.GELU):
    -        """
    +        """
             :param int hidden_size: Size of the hidden layer, defaults to 100.
             :param func: The activation function, default to nn.GELU.
             """
    @@ -140,8 +508,10 @@ 

    Source code for pina.model.layers.avno_layer

    self._nn = nn.Linear(hidden_size, hidden_size)
             self._func = func()
     
    -
    [docs] def forward(self, x): - r""" +
    +[docs] + def forward(self, x): + r""" Forward pass of the layer, it performs a sum of local average and an affine transformation of the field. @@ -155,36 +525,73 @@

    Source code for pina.model.layers.avno_layer

            :return: The output tensor obtained from Average Neural Operator Block.
             :rtype: torch.Tensor
             """
    -        return self._func(self._nn(x) + mean(x, dim=1, keepdim=True))
    + return self._func(self._nn(x) + mean(x, dim=1, keepdim=True))
    +
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/layers/convolution_2d.html b/_modules/pina/model/layers/convolution_2d.html index 40f0d7cf..aa3d6810 100644 --- a/_modules/pina/model/layers/convolution_2d.html +++ b/_modules/pina/model/layers/convolution_2d.html @@ -1,93 +1,459 @@ + - - - - - pina.model.layers.convolution_2d — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.layers.convolution_2d — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
- -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - +
    + + + + + +
    +

    Source code for pina.model.layers.convolution_2d

     """Module for Continuous Convolution class"""
     
    @@ -97,8 +463,10 @@ 

    Source code for pina.model.layers.convolution_2d

    import torch -
    [docs]class ContinuousConvBlock(BaseContinuousConv): - """ +
    +[docs] +class ContinuousConvBlock(BaseContinuousConv): + """ Implementation of Continuous Convolutional operator. The algorithm expects input to be in the form: @@ -133,7 +501,7 @@

    Source code for pina.model.layers.convolution_2d

    optimize=False, no_overlap=False, ): - """ + """ :param input_numb_field: Number of fields :math:`N_{in}` in the input. :type input_numb_field: int :param output_numb_field: Number of fields :math:`N_{out}` in the output. @@ -226,7 +594,7 @@

    Source code for pina.model.layers.convolution_2d

    self._stride = self._stride._stride_discrete def _spawn_networks(self, model): - """ + """ Private method to create a collection of kernels :param model: A :class:`torch.nn.Module` model in form of Object class. @@ -259,7 +627,7 @@

    Source code for pina.model.layers.convolution_2d

    return torch.nn.ModuleList(nets) def _extract_mapped_points(self, batch_idx, index, x): - """ + """ Priviate method to extract mapped points in the filter :param x: Input tensor of shape ``[channel, N, dim]`` @@ -301,7 +669,7 @@

    Source code for pina.model.layers.convolution_2d

    return stacked_input, indeces_channels def _find_index(self, X): - """ + """ Private method to extract indeces for convolution. :param X: Input tensor, as in ContinuousConvBlock ``__init__``. @@ -319,7 +687,7 @@

    Source code for pina.model.layers.convolution_2d

    self._index = index def _make_grid_forward(self, X): - """ + """ Private method to create forward convolution grid. :param X: Input tensor, as in ContinuousConvBlock docstring. @@ -347,7 +715,7 @@

    Source code for pina.model.layers.convolution_2d

    self._grid = grid.detach() def _make_grid_transpose(self, X): - """ + """ Private method to create transpose convolution grid. :param X: Input tensor, as in ContinuousConvBlock docstring. @@ -363,7 +731,7 @@

    Source code for pina.model.layers.convolution_2d

    self._grid_transpose = tmp def _make_grid(self, X, type): - """ + """ Private method to create convolution grid. :param X: Input tensor, as in ContinuousConvBlock docstring. @@ -382,7 +750,7 @@

    Source code for pina.model.layers.convolution_2d

    raise TypeError def _initialize_convolution(self, X, type="forward"): - """ + """ Private method to intialize the convolution. The convolution is initialized by setting a grid and calculate the index for finding the points inside the @@ -400,8 +768,10 @@

    Source code for pina.model.layers.convolution_2d

    # calculate the index self._find_index(X) -
    [docs] def forward(self, X): - """ +
    +[docs] + def forward(self, X): + """ Forward pass in the convolutional layer. :param x: Input data for the convolution :math:`[B, N_{in}, N, D]`. @@ -462,8 +832,11 @@

    Source code for pina.model.layers.convolution_2d

    ).sum(1) return conv
    -
    [docs] def transpose_no_overlap(self, integrals, X): - """ + +
    +[docs] + def transpose_no_overlap(self, integrals, X): + """ Transpose pass in the layer for no-overlapping filters :param integrals: Weights for the transpose convolution. Shape @@ -545,8 +918,11 @@

    Source code for pina.model.layers.convolution_2d

    return conv_transposed
    -
    [docs] def transpose_overlap(self, integrals, X): - """ + +
    +[docs] + def transpose_overlap(self, integrals, X): + """ Transpose pass in the layer for overlapping filters :param integrals: Weights for the transpose convolution. Shape @@ -659,36 +1035,73 @@

    Source code for pina.model.layers.convolution_2d

    # save results of accumulation for each batch conv_transposed[batch_idx, ..., -1] = accumulator_batch - return conv_transposed
    + return conv_transposed
    +
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/layers/embedding.html b/_modules/pina/model/layers/embedding.html index 252c38eb..4f9ba2c3 100644 --- a/_modules/pina/model/layers/embedding.html +++ b/_modules/pina/model/layers/embedding.html @@ -1,93 +1,459 @@ + - - - - - pina.model.layers.embedding — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.layers.embedding — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.model.layers.embedding

     """ Embedding modulus. """
     
    @@ -96,8 +462,10 @@ 

    Source code for pina.model.layers.embedding

     from typing import Union, Sequence
     
     
    -
    [docs]class PeriodicBoundaryEmbedding(torch.nn.Module): - r""" +
    +[docs] +class PeriodicBoundaryEmbedding(torch.nn.Module): + r""" Imposing hard constraint periodic boundary conditions by embedding the input. @@ -146,7 +514,7 @@

    Source code for pina.model.layers.embedding

         """
     
         def __init__(self, input_dimension, periods, output_dimension=None):
    -        """
    +        """
             :param int input_dimension: The dimension of the input tensor, it can
                 be checked with `tensor.ndim` method.
             :param float | int | dict periods: The periodicity in each dimension for
    @@ -187,8 +555,10 @@ 

    Source code for pina.model.layers.embedding

             else:
                 self._period = {k: periods for k in range(input_dimension)}
     
    -
    [docs] def forward(self, x): - """ +
    +[docs] + def forward(self, x): + """ Forward pass to compute the periodic boundary conditions embedding. :param torch.Tensor x: Input tensor. @@ -214,8 +584,9 @@

    Source code for pina.model.layers.embedding

                 )
             )
    + def _get_vars(self, x, indeces): - """ + """ Get variables from input tensor ordered by specific indeces. :param torch.Tensor x: The input tensor to extract. @@ -243,15 +614,18 @@

    Source code for pina.model.layers.embedding

     
         @property
         def period(self):
    -        """
    +        """
             The period of the periodic function to approximate.
             """
             return self._period
    -
    [docs]class FourierFeatureEmbedding(torch.nn.Module): + +
    +[docs] +class FourierFeatureEmbedding(torch.nn.Module): def __init__(self, input_dimension, output_dimension, sigma): - r""" + r""" Fourier Feature Embedding class for encoding input features using random Fourier features.This class applies a Fourier transformation to the input features, @@ -313,7 +687,7 @@

    Source code for pina.model.layers.embedding

             if output_dimension % 2:
                 raise RuntimeError(
                     "Expected output_dimension to be a even number, "
    -                f"got {output_dimension}."
    +                f"got {output_dimension}."
                 )
     
             # assign sigma
    @@ -328,8 +702,10 @@ 

    Source code for pina.model.layers.embedding

                 * self.sigma
             )
     
    -
    [docs] def forward(self, x): - """ +
    +[docs] + def forward(self, x): + """ Forward pass to compute the fourier embedding. :param torch.Tensor x: Input tensor. @@ -344,41 +720,78 @@

    Source code for pina.model.layers.embedding

                 dim=-1,
             )
    + @property def sigma(self): - """ + """ Returning the variance of the sampled matrix for Fourier Embedding. """ return self._sigma
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/layers/fourier.html b/_modules/pina/model/layers/fourier.html index ce34e7f8..ffe5f0e1 100644 --- a/_modules/pina/model/layers/fourier.html +++ b/_modules/pina/model/layers/fourier.html @@ -1,93 +1,459 @@ + - - - - - pina.model.layers.fourier — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.layers.fourier — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
- -
- - -
- -
-
-
-
+ + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
-
-
- + + + + + +
+

Source code for pina.model.layers.fourier

 import torch
 import torch.nn as nn
@@ -100,8 +466,10 @@ 

Source code for pina.model.layers.fourier

 )
 
 
-
[docs]class FourierBlock1D(nn.Module): - """ +
+[docs] +class FourierBlock1D(nn.Module): + """ Fourier block implementation for three dimensional input tensor. The combination of Fourier blocks make up the Fourier Neural Operator @@ -124,7 +492,7 @@

Source code for pina.model.layers.fourier

         activation=torch.nn.Tanh,
     ):
         super().__init__()
-        """
+        """
         PINA implementation of Fourier block one dimension. The module computes
         the spectral convolution of the input with a linear kernel in the
         fourier space, and then it maps the input back to the physical
@@ -153,8 +521,10 @@ 

Source code for pina.model.layers.fourier

         self._activation = activation()
         self._linear = nn.Conv1d(input_numb_fields, output_numb_fields, 1)
 
-
[docs] def forward(self, x): - """ +
+[docs] + def forward(self, x): + """ Forward computation for Fourier Block. It performs a spectral convolution and a linear transformation of the input and sum the results. @@ -166,11 +536,15 @@

Source code for pina.model.layers.fourier

             fourier block of size ``[batch, output_numb_fields, x]``.
         :rtype: torch.Tensor
         """
-        return self._activation(self._spectral_conv(x) + self._linear(x))
+ return self._activation(self._spectral_conv(x) + self._linear(x))
+
+ -
[docs]class FourierBlock2D(nn.Module): - """ +
+[docs] +class FourierBlock2D(nn.Module): + """ Fourier block implementation for two dimensional input tensor. The combination of Fourier blocks make up the Fourier Neural Operator @@ -192,7 +566,7 @@

Source code for pina.model.layers.fourier

         n_modes,
         activation=torch.nn.Tanh,
     ):
-        """
+        """
         PINA implementation of Fourier block two dimensions. The module computes
         the spectral convolution of the input with a linear kernel in the
         fourier space, and then it maps the input back to the physical
@@ -223,8 +597,10 @@ 

Source code for pina.model.layers.fourier

         self._activation = activation()
         self._linear = nn.Conv2d(input_numb_fields, output_numb_fields, 1)
 
-
[docs] def forward(self, x): - """ +
+[docs] + def forward(self, x): + """ Forward computation for Fourier Block. It performs a spectral convolution and a linear transformation of the input and sum the results. @@ -236,11 +612,15 @@

Source code for pina.model.layers.fourier

             fourier block of size ``[batch, output_numb_fields, x, y, z]``.
         :rtype: torch.Tensor
         """
-        return self._activation(self._spectral_conv(x) + self._linear(x))
+ return self._activation(self._spectral_conv(x) + self._linear(x))
+
-
[docs]class FourierBlock3D(nn.Module): - """ + +
+[docs] +class FourierBlock3D(nn.Module): + """ Fourier block implementation for three dimensional input tensor. The combination of Fourier blocks make up the Fourier Neural Operator @@ -262,7 +642,7 @@

Source code for pina.model.layers.fourier

         n_modes,
         activation=torch.nn.Tanh,
     ):
-        """
+        """
         PINA implementation of Fourier block three dimensions. The module computes
         the spectral convolution of the input with a linear kernel in the
         fourier space, and then it maps the input back to the physical
@@ -294,8 +674,10 @@ 

Source code for pina.model.layers.fourier

         self._activation = activation()
         self._linear = nn.Conv3d(input_numb_fields, output_numb_fields, 1)
 
-
[docs] def forward(self, x): - """ +
+[docs] + def forward(self, x): + """ Forward computation for Fourier Block. It performs a spectral convolution and a linear transformation of the input and sum the results. @@ -307,36 +689,73 @@

Source code for pina.model.layers.fourier

             fourier block of size ``[batch, output_numb_fields, x, y, z]``.
         :rtype: torch.Tensor
         """
-        return self._activation(self._spectral_conv(x) + self._linear(x))
+ return self._activation(self._spectral_conv(x) + self._linear(x))
+
+
-
+
+ + + + + +
+ + + +
-
+
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + - - - - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/layers/lowrank_layer.html b/_modules/pina/model/layers/lowrank_layer.html index 401b0b00..16eb32f0 100644 --- a/_modules/pina/model/layers/lowrank_layer.html +++ b/_modules/pina/model/layers/lowrank_layer.html @@ -1,93 +1,459 @@ + - - - - - pina.model.layers.lowrank_layer — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.layers.lowrank_layer — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.model.layers.lowrank_layer

     """ Module for Averaging Neural Operator Layer class. """
     
    @@ -97,8 +463,10 @@ 

    Source code for pina.model.layers.lowrank_layer

    < import pina.model as pm # avoid circular import -
    [docs]class LowRankBlock(torch.nn.Module): - r""" +
    +[docs] +class LowRankBlock(torch.nn.Module): + r""" The PINA implementation of the inner layer of the Averaging Neural Operator. The operator layer performs an affine transformation where the convolution @@ -143,7 +511,7 @@

    Source code for pina.model.layers.lowrank_layer

    < func=torch.nn.Tanh, bias=True, ): - """ + """ :param int input_dimensions: The number of input components of the model. Expected tensor shape of the form :math:`(*, d)`, where * @@ -185,8 +553,10 @@

    Source code for pina.model.layers.lowrank_layer

    < self._rank = rank self._func = func() -
    [docs] def forward(self, x, coords): - r""" +
    +[docs] + def forward(self, x, coords): + r""" Forward pass of the layer, it performs an affine transformation of the field, and a low rank approximation by doing a dot product of the basis @@ -224,41 +594,78 @@

    Source code for pina.model.layers.lowrank_layer

    < # apply linear layer and return return self._func(self._nn(x) + expansion)
    + @property def rank(self): - """ + """ The basis rank. """ return self._rank
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/layers/orthogonal.html b/_modules/pina/model/layers/orthogonal.html new file mode 100644 index 00000000..4881d0bf --- /dev/null +++ b/_modules/pina/model/layers/orthogonal.html @@ -0,0 +1,651 @@ + + + + + + + + + + pina.model.layers.orthogonal — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for pina.model.layers.orthogonal

+"""Module for OrthogonalBlock."""
+
+import torch
+from ...utils import check_consistency
+
+
+
+[docs] +class OrthogonalBlock(torch.nn.Module): + """ + Module to make the input orthonormal. + The module takes a tensor of size :math:`[N, M]` and returns a tensor of + size :math:`[N, M]` where the columns are orthonormal. The block performs a + Gram Schmidt orthogonalization process for the input, see + `here <https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process>` for + details. + """ + + def __init__(self, dim=-1, requires_grad=True): + """ + Initialize the OrthogonalBlock module. + + :param int dim: The dimension where to orthogonalize. + :param bool requires_grad: If autograd should record operations on + the returned tensor, defaults to True. + """ + super().__init__() + # store dim + self.dim = dim + # store requires_grad + check_consistency(requires_grad, bool) + self._requires_grad = requires_grad + +
+[docs] + def forward(self, X): + """ + Forward pass of the OrthogonalBlock module using a Gram-Schmidt + algorithm. + + :raises Warning: If the dimension is greater than the other dimensions. + + :param torch.Tensor X: The input tensor to orthogonalize. The input must + be of dimensions :math:`[N, M]`. + :return: The orthonormal tensor. + """ + # check dim is less than all the other dimensions + if X.shape[self.dim] > min(X.shape): + raise Warning( + "The dimension where to orthogonalize is greater" + " than the other dimensions" + ) + + result = torch.zeros_like(X, requires_grad=self._requires_grad) + X_0 = torch.select(X, self.dim, 0).clone() + result_0 = X_0 / torch.linalg.norm(X_0) + result = self._differentiable_copy(result, 0, result_0) + + # iterate over the rest of the basis with Gram-Schmidt + for i in range(1, X.shape[self.dim]): + v = torch.select(X, self.dim, i).clone() + for j in range(i): + vj = torch.select(result, self.dim, j).clone() + v = v - torch.sum(v * vj, dim=self.dim, keepdim=True) * vj + # result_i = torch.select(result, self.dim, i) + result_i = v / torch.linalg.norm(v) + result = self._differentiable_copy(result, i, result_i) + return result
+ + + def _differentiable_copy(self, result, idx, value): + """ + Perform a differentiable copy operation on a tensor. + + :param torch.Tensor result: The tensor where values will be copied to. + :param int idx: The index along the specified dimension where the + value will be copied. + :param torch.Tensor value: The tensor value to copy into the + result tensor. + :return: A new tensor with the copied values. + :rtype: torch.Tensor + """ + return result.index_copy( + self.dim, torch.tensor([idx]), value.unsqueeze(self.dim) + ) + + @property + def dim(self): + """ + Get the dimension along which operations are performed. + + :return: The current dimension value. + :rtype: int + """ + return self._dim + + @dim.setter + def dim(self, value): + """ + Set the dimension along which operations are performed. + + :param value: The dimension to be set, which must be 0, 1, or -1. + :type value: int + :raises IndexError: If the provided dimension is not in the + range [-1, 1]. + """ + # check consistency + check_consistency(value, int) + if value not in [0, 1, -1]: + raise IndexError( + "Dimension out of range (expected to be in " + f"range of [-1, 1], but got {value})" + ) + # assign value + self._dim = value + + @property + def requires_grad(self): + """ + Indicates whether gradient computation is required for operations + on the tensors. + + :return: True if gradients are required, False otherwise. + :rtype: bool + """ + return self._requires_grad
+ +
+ +
+ + + + + +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/pina/model/layers/pod.html b/_modules/pina/model/layers/pod.html index 647051e7..d24c7719 100644 --- a/_modules/pina/model/layers/pod.html +++ b/_modules/pina/model/layers/pod.html @@ -1,93 +1,459 @@ + - - - - - pina.model.layers.pod — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.layers.pod — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
- -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - +
    + + + + + +
    +

    Source code for pina.model.layers.pod

     """Module for Base Continuous Convolution class."""
     
    @@ -97,8 +463,10 @@ 

    Source code for pina.model.layers.pod

     from .utils_convolution import optimizing
     
     
    -
    [docs]class PODBlock(torch.nn.Module): - """ +
    +[docs] +class PODBlock(torch.nn.Module): + """ POD layer: it projects the input field on the proper orthogonal decomposition basis. It needs to be fitted to the data before being used with the method :meth:`fit`, which invokes the singular value decomposition. @@ -109,7 +477,7 @@

    Source code for pina.model.layers.pod

         """
     
         def __init__(self, rank, scale_coefficients=True):
    -        """
    +        """
             Build the POD layer with the given rank.
     
             :param int rank: The rank of the POD layer.
    @@ -124,7 +492,7 @@ 

    Source code for pina.model.layers.pod

     
         @property
         def rank(self):
    -        """
    +        """
             The rank of the POD layer.
     
             :rtype: int
    @@ -140,7 +508,7 @@ 

    Source code for pina.model.layers.pod

     
         @property
         def basis(self):
    -        """
    +        """
             The POD basis. It is a matrix whose columns are the first `self.rank` POD modes.
     
             :rtype: torch.Tensor
    @@ -152,7 +520,7 @@ 

    Source code for pina.model.layers.pod

     
         @property
         def scaler(self):
    -        """
    +        """
             The scaler. It is a dictionary with the keys `'mean'` and `'std'` that
             store the mean and the standard deviation of the coefficients.
     
    @@ -168,7 +536,7 @@ 

    Source code for pina.model.layers.pod

     
         @property
         def scale_coefficients(self):
    -        """
    +        """
             If True, the coefficients are scaled after the projection to have zero
             mean and unit variance.
     
    @@ -176,8 +544,10 @@ 

    Source code for pina.model.layers.pod

             """
             return self.__scale_coefficients
     
    -
    [docs] def fit(self, X): - """ +
    +[docs] + def fit(self, X): + """ Set the POD basis by performing the singular value decomposition of the given tensor. If `self.scale_coefficients` is True, the coefficients are scaled after the projection to have zero mean and unit variance. @@ -189,8 +559,9 @@

    Source code for pina.model.layers.pod

             if self.__scale_coefficients:
                 self._fit_scaler(torch.matmul(self._basis, X.T))
    + def _fit_scaler(self, coeffs): - """ + """ Private merhod that computes the mean and the standard deviation of the given coefficients, allowing to scale them to have zero mean and unit variance. Mean and standard deviation are stored in the private member @@ -204,7 +575,7 @@

    Source code for pina.model.layers.pod

             }
     
         def _fit_pod(self, X):
    -        """
    +        """
             Private method that computes the POD basis of the given tensor and stores it in the private member `_basis`.
     
             :param torch.Tensor X: The tensor to be reduced.
    @@ -214,8 +585,10 @@ 

    Source code for pina.model.layers.pod

             else:
                 self._basis = torch.svd_lowrank(X.T, q=X.shape[0])[0].T
     
    -
    [docs] def forward(self, X): - """ +
    +[docs] + def forward(self, X): + """ The forward pass of the POD layer. By default it executes the :meth:`reduce` method, reducing the input tensor to its POD representation. The POD layer needs to be fitted before being used. @@ -226,8 +599,11 @@

    Source code for pina.model.layers.pod

             """
             return self.reduce(X)
    -
    [docs] def reduce(self, X): - """ + +
    +[docs] + def reduce(self, X): + """ Reduce the input tensor to its POD representation. The POD layer needs to be fitted before being used. @@ -250,8 +626,11 @@

    Source code for pina.model.layers.pod

     
             return coeff
    -
    [docs] def expand(self, coeff): - """ + +
    +[docs] + def expand(self, coeff): + """ Expand the given coefficients to the original space. The POD layer needs to be fitted before being used. @@ -271,36 +650,73 @@

    Source code for pina.model.layers.pod

             if predicted.ndim == 1:
                 predicted = predicted.unsqueeze(0)
     
    -        return predicted
    + return predicted
    +
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/layers/rbf_layer.html b/_modules/pina/model/layers/rbf_layer.html new file mode 100644 index 00000000..23f88819 --- /dev/null +++ b/_modules/pina/model/layers/rbf_layer.html @@ -0,0 +1,1004 @@ + + + + + + + + + + pina.model.layers.rbf_layer — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for pina.model.layers.rbf_layer

+"""Module for Radial Basis Function Interpolation layer."""
+
+import math
+import warnings
+from itertools import combinations_with_replacement
+import torch
+from ...utils import check_consistency
+
+
+def linear(r):
+    """
+    Linear radial basis function.
+    """
+    return -r
+
+
+def thin_plate_spline(r, eps=1e-7):
+    """
+    Thin plate spline radial basis function.
+    """
+    r = torch.clamp(r, min=eps)
+    return r**2 * torch.log(r)
+
+
+def cubic(r):
+    """
+    Cubic radial basis function.
+    """
+    return r**3
+
+
+def quintic(r):
+    """
+    Quintic radial basis function.
+    """
+    return -(r**5)
+
+
+def multiquadric(r):
+    """
+    Multiquadric radial basis function.
+    """
+    return -torch.sqrt(r**2 + 1)
+
+
+def inverse_multiquadric(r):
+    """
+    Inverse multiquadric radial basis function.
+    """
+    return 1 / torch.sqrt(r**2 + 1)
+
+
+def inverse_quadratic(r):
+    """
+    Inverse quadratic radial basis function.
+    """
+    return 1 / (r**2 + 1)
+
+
+def gaussian(r):
+    """
+    Gaussian radial basis function.
+    """
+    return torch.exp(-(r**2))
+
+
+radial_functions = {
+    "linear": linear,
+    "thin_plate_spline": thin_plate_spline,
+    "cubic": cubic,
+    "quintic": quintic,
+    "multiquadric": multiquadric,
+    "inverse_multiquadric": inverse_multiquadric,
+    "inverse_quadratic": inverse_quadratic,
+    "gaussian": gaussian,
+}
+
+scale_invariant = {"linear", "thin_plate_spline", "cubic", "quintic"}
+
+min_degree_funcs = {
+    "multiquadric": 0,
+    "linear": 0,
+    "thin_plate_spline": 1,
+    "cubic": 1,
+    "quintic": 2,
+}
+
+
+
+[docs] +class RBFBlock(torch.nn.Module): + """ + Radial Basis Function (RBF) interpolation layer. It need to be fitted with + the data with the method :meth:`fit`, before it can be used to interpolate + new points. The layer is not trainable. + + .. note:: + It reproduces the implementation of ``scipy.interpolate.RBFBlock`` and + it is inspired from the implementation in `torchrbf. + <https://github.com/ArmanMaesumi/torchrbf>`_ + """ + + def __init__( + self, + neighbors=None, + smoothing=0.0, + kernel="thin_plate_spline", + epsilon=None, + degree=None, + ): + """ + :param int neighbors: Number of neighbors to use for the + interpolation. + If ``None``, use all data points. + :param float smoothing: Smoothing parameter for the interpolation. + if 0.0, the interpolation is exact and no smoothing is applied. + :param str kernel: Radial basis function to use. Must be one of + ``linear``, ``thin_plate_spline``, ``cubic``, ``quintic``, + ``multiquadric``, ``inverse_multiquadric``, ``inverse_quadratic``, + or ``gaussian``. + :param float epsilon: Shape parameter that scaled the input to + the RBF. This defaults to 1 for kernels in ``scale_invariant`` + dictionary, and must be specified for other kernels. + :param int degree: Degree of the added polynomial. + For some kernels, there exists a minimum degree of the polynomial + such that the RBF is well-posed. Those minimum degrees are specified + in the `min_degree_funcs` dictionary above. If `degree` is less than + the minimum degree, a warning is raised and the degree is set to the + minimum value. + """ + + super().__init__() + check_consistency(neighbors, (int, type(None))) + check_consistency(smoothing, (int, float, torch.Tensor)) + check_consistency(kernel, str) + check_consistency(epsilon, (float, type(None))) + check_consistency(degree, (int, type(None))) + + self.neighbors = neighbors + self.smoothing = smoothing + self.kernel = kernel + self.epsilon = epsilon + self.degree = degree + self.powers = None + # initialize data points and values + self.y = None + self.d = None + # initialize attributes for the fitted model + self._shift = None + self._scale = None + self._coeffs = None + + @property + def smoothing(self): + """ + Smoothing parameter for the interpolation. + + :rtype: float + """ + return self._smoothing + + @smoothing.setter + def smoothing(self, value): + self._smoothing = value + + @property + def kernel(self): + """ + Radial basis function to use. + + :rtype: str + """ + return self._kernel + + @kernel.setter + def kernel(self, value): + if value not in radial_functions: + raise ValueError(f"Unknown kernel: {value}") + self._kernel = value.lower() + + @property + def epsilon(self): + """ + Shape parameter that scaled the input to the RBF. + + :rtype: float + """ + return self._epsilon + + @epsilon.setter + def epsilon(self, value): + if value is None: + if self.kernel in scale_invariant: + value = 1.0 + else: + raise ValueError("Must specify `epsilon` for this kernel.") + else: + value = float(value) + self._epsilon = value + + @property + def degree(self): + """ + Degree of the added polynomial. + + :rtype: int + """ + return self._degree + + @degree.setter + def degree(self, value): + min_degree = min_degree_funcs.get(self.kernel, -1) + if value is None: + value = max(min_degree, 0) + else: + value = int(value) + if value < -1: + raise ValueError("`degree` must be at least -1.") + if value < min_degree: + warnings.warn( + "`degree` is too small for this kernel. Setting to " + f"{min_degree}.", + UserWarning, + ) + self._degree = value + + def _check_data(self, y, d): + if y.ndim != 2: + raise ValueError("y must be a 2-dimensional tensor.") + + if d.shape[0] != y.shape[0]: + raise ValueError( + "The first dim of d must have the same length as " + "the first dim of y." + ) + + if isinstance(self.smoothing, (int, float)): + self.smoothing = ( + torch.full((y.shape[0],), self.smoothing).float().to(y.device) + ) + +
+[docs] + def fit(self, y, d): + """ + Fit the RBF interpolator to the data. + + :param torch.Tensor y: (n, d) tensor of data points. + :param torch.Tensor d: (n, m) tensor of data values. + """ + self._check_data(y, d) + + self.y = y + self.d = d + + if self.neighbors is None: + nobs = self.y.shape[0] + else: + raise NotImplementedError("neighbors currently not supported") + + powers = RBFBlock.monomial_powers(self.y.shape[1], self.degree).to( + y.device + ) + if powers.shape[0] > nobs: + raise ValueError( + "The data is not compatible with the requested degree." + ) + + if self.neighbors is None: + self._shift, self._scale, self._coeffs = RBFBlock.solve( + self.y, + self.d.reshape((self.y.shape[0], -1)), + self.smoothing, + self.kernel, + self.epsilon, + powers, + ) + + self.powers = powers
+ + +
+[docs] + def forward(self, x): + """ + Returns the interpolated data at the given points `x`. + + :param torch.Tensor x: `(n, d)` tensor of points at which + to query the interpolator + + :rtype: `(n, m)` torch.Tensor of interpolated data. + """ + if x.ndim != 2: + raise ValueError("`x` must be a 2-dimensional tensor.") + + nx, ndim = x.shape + if ndim != self.y.shape[1]: + raise ValueError( + "Expected the second dim of `x` to have length " + f"{self.y.shape[1]}." + ) + + kernel_func = radial_functions[self.kernel] + + yeps = self.y * self.epsilon + xeps = x * self.epsilon + xhat = (x - self._shift) / self._scale + + kv = RBFBlock.kernel_vector(xeps, yeps, kernel_func) + p = RBFBlock.polynomial_matrix(xhat, self.powers) + vec = torch.cat([kv, p], dim=1) + out = torch.matmul(vec, self._coeffs) + out = out.reshape((nx,) + self.d.shape[1:]) + return out
+ + +
+[docs] + @staticmethod + def kernel_vector(x, y, kernel_func): + """ + Evaluate radial functions with centers `y` for all points in `x`. + + :param torch.Tensor x: `(n, d)` tensor of points. + :param torch.Tensor y: `(m, d)` tensor of centers. + :param str kernel_func: Radial basis function to use. + + :rtype: `(n, m)` torch.Tensor of radial function values. + """ + return kernel_func(torch.cdist(x, y))
+ + +
+[docs] + @staticmethod + def polynomial_matrix(x, powers): + """ + Evaluate monomials at `x` with given `powers`. + + :param torch.Tensor x: `(n, d)` tensor of points. + :param torch.Tensor powers: `(r, d)` tensor of powers for each monomial. + + :rtype: `(n, r)` torch.Tensor of monomial values. + """ + x_ = torch.repeat_interleave(x, repeats=powers.shape[0], dim=0) + powers_ = powers.repeat(x.shape[0], 1) + return torch.prod(x_**powers_, dim=1).view(x.shape[0], powers.shape[0])
+ + +
+[docs] + @staticmethod + def kernel_matrix(x, kernel_func): + """ + Returns radial function values for all pairs of points in `x`. + + :param torch.Tensor x: `(n, d`) tensor of points. + :param str kernel_func: Radial basis function to use. + + :rtype: `(n, n`) torch.Tensor of radial function values. + """ + return kernel_func(torch.cdist(x, x))
+ + +
+[docs] + @staticmethod + def monomial_powers(ndim, degree): + """ + Return the powers for each monomial in a polynomial. + + :param int ndim: Number of variables in the polynomial. + :param int degree: Degree of the polynomial. + + :rtype: `(nmonos, ndim)` torch.Tensor where each row contains the powers + for each variable in a monomial. + + """ + nmonos = math.comb(degree + ndim, ndim) + out = torch.zeros((nmonos, ndim), dtype=torch.int32) + count = 0 + for deg in range(degree + 1): + for mono in combinations_with_replacement(range(ndim), deg): + for var in mono: + out[count, var] += 1 + count += 1 + return out
+ + +
+[docs] + @staticmethod + def build(y, d, smoothing, kernel, epsilon, powers): + """ + Build the RBF linear system. + + :param torch.Tensor y: (n, d) tensor of data points. + :param torch.Tensor d: (n, m) tensor of data values. + :param torch.Tensor smoothing: (n,) tensor of smoothing parameters. + :param str kernel: Radial basis function to use. + :param float epsilon: Shape parameter that scaled the input to the RBF. + :param torch.Tensor powers: (r, d) tensor of powers for each monomial. + + :rtype: (lhs, rhs, shift, scale) where `lhs` and `rhs` are the + left-hand side and right-hand side of the linear system, and + `shift` and `scale` are the shift and scale parameters. + """ + p = d.shape[0] + s = d.shape[1] + r = powers.shape[0] + kernel_func = radial_functions[kernel] + + mins = torch.min(y, dim=0).values + maxs = torch.max(y, dim=0).values + shift = (maxs + mins) / 2 + scale = (maxs - mins) / 2 + + scale[scale == 0.0] = 1.0 + + yeps = y * epsilon + yhat = (y - shift) / scale + + lhs = torch.empty((p + r, p + r), device=d.device).float() + lhs[:p, :p] = RBFBlock.kernel_matrix(yeps, kernel_func) + lhs[:p, p:] = RBFBlock.polynomial_matrix(yhat, powers) + lhs[p:, :p] = lhs[:p, p:].T + lhs[p:, p:] = 0.0 + lhs[:p, :p] += torch.diag(smoothing) + + rhs = torch.empty((r + p, s), device=d.device).float() + rhs[:p] = d + rhs[p:] = 0.0 + return lhs, rhs, shift, scale
+ + +
+[docs] + @staticmethod + def solve(y, d, smoothing, kernel, epsilon, powers): + """ + Build then solve the RBF linear system. + + :param torch.Tensor y: (n, d) tensor of data points. + :param torch.Tensor d: (n, m) tensor of data values. + :param torch.Tensor smoothing: (n,) tensor of smoothing parameters. + + :param str kernel: Radial basis function to use. + :param float epsilon: Shape parameter that scaled the input to the RBF. + :param torch.Tensor powers: (r, d) tensor of powers for each monomial. + + :raises ValueError: If the linear system is singular. + + :rtype: (shift, scale, coeffs) where `shift` and `scale` are the + shift and scale parameters, and `coeffs` are the coefficients + of the interpolator + """ + + lhs, rhs, shift, scale = RBFBlock.build( + y, d, smoothing, kernel, epsilon, powers + ) + try: + coeffs = torch.linalg.solve(lhs, rhs) + except RuntimeError as e: + msg = "Singular matrix." + nmonos = powers.shape[0] + if nmonos > 0: + pmat = RBFBlock.polynomial_matrix((y - shift) / scale, powers) + rank = torch.linalg.matrix_rank(pmat) + if rank < nmonos: + msg = ( + "Singular matrix. The matrix of monomials evaluated at " + "the data point coordinates does not have full column " + f"rank ({rank}/{nmonos})." + ) + + raise ValueError(msg) from e + + return shift, scale, coeffs
+
+ +
+ +
+ + + + + +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/pina/model/layers/residual.html b/_modules/pina/model/layers/residual.html index 8462a908..03d57dec 100644 --- a/_modules/pina/model/layers/residual.html +++ b/_modules/pina/model/layers/residual.html @@ -1,101 +1,469 @@ + - - - - - pina.model.layers.residual — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.layers.residual — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.model.layers.residual

     import torch
     import torch.nn as nn
     from ...utils import check_consistency
     
     
    -
    [docs]class ResidualBlock(nn.Module): - """Residual block base class. Implementation of a residual block. +
    +[docs] +class ResidualBlock(nn.Module): + """Residual block base class. Implementation of a residual block. .. seealso:: @@ -115,7 +483,7 @@

    Source code for pina.model.layers.residual

             spectral_norm=False,
             activation=torch.nn.ReLU(),
         ):
    -        """
    +        """
             Initializes the ResidualBlock module.
     
             :param int input_dim: Dimension of the input to pass to the
    @@ -149,8 +517,10 @@ 

    Source code for pina.model.layers.residual

             self._l2 = self._spect_norm(nn.Linear(hidden_dim, output_dim))
             self._l3 = self._spect_norm(nn.Linear(input_dim, output_dim))
     
    -
    [docs] def forward(self, x): - """Forward pass for residual block layer. +
    +[docs] + def forward(self, x): + """Forward pass for residual block layer. :param torch.Tensor x: Input tensor for the residual layer. :return: Output tensor for the residual layer. @@ -161,8 +531,9 @@

    Source code for pina.model.layers.residual

             x = self._l3(x)
             return y + x
    + def _spect_norm(self, x): - """Perform spectral norm on the layers. + """Perform spectral norm on the layers. :param x: A torch.nn.Module Linear layer :type x: torch.nn.Module @@ -172,12 +543,15 @@

    Source code for pina.model.layers.residual

             return nn.utils.spectral_norm(x) if self._spectral_norm else x
    + import torch import torch.nn as nn -
    [docs]class EnhancedLinear(torch.nn.Module): - """ +
    +[docs] +class EnhancedLinear(torch.nn.Module): + """ A wrapper class for enhancing a linear layer with activation and/or dropout. :param layer: The linear layer to be enhanced. @@ -196,7 +570,7 @@

    Source code for pina.model.layers.residual

         """
     
         def __init__(self, layer, activation=None, dropout=None):
    -        """
    +        """
             Initializes the EnhancedLinear module.
     
             :param layer: The linear layer to be enhanced.
    @@ -230,8 +604,10 @@ 

    Source code for pina.model.layers.residual

                     layer, activation, self._drop(dropout)
                 )
     
    -
    [docs] def forward(self, x): - """ +
    +[docs] + def forward(self, x): + """ Forward pass through the enhanced linear module. :param x: Input tensor. @@ -242,8 +618,9 @@

    Source code for pina.model.layers.residual

             """
             return self._model(x)
    + def _drop(self, p): - """ + """ Applies dropout with probability p. :param p: Dropout probability. @@ -253,35 +630,71 @@

    Source code for pina.model.layers.residual

             :rtype: torch.nn.Dropout
             """
             return torch.nn.Dropout(p)
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/layers/spectral.html b/_modules/pina/model/layers/spectral.html index 5c111a9e..023df5ff 100644 --- a/_modules/pina/model/layers/spectral.html +++ b/_modules/pina/model/layers/spectral.html @@ -1,93 +1,459 @@ + - - - - - pina.model.layers.spectral — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.layers.spectral — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
- -
- - -
- -
-
-
-
+ + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
-
-
- + + + + + +
+

Source code for pina.model.layers.spectral

 import torch
 import torch.nn as nn
@@ -96,14 +462,16 @@ 

Source code for pina.model.layers.spectral

 
 
 ######## 1D Spectral Convolution ###########
-
[docs]class SpectralConvBlock1D(nn.Module): - """ +
+[docs] +class SpectralConvBlock1D(nn.Module): + """ PINA implementation of Spectral Convolution Block for one dimensional tensors. """ def __init__(self, input_numb_fields, output_numb_fields, n_modes): - """ + """ The module computes the spectral convolution of the input with a linear kernel in the fourier space, and then it maps the input back to the physical space. @@ -140,7 +508,7 @@

Source code for pina.model.layers.spectral

         )
 
     def _compute_mult1d(self, input, weights):
-        """
+        """
         Compute the matrix multiplication of the input
         with the linear kernel weights.
 
@@ -156,8 +524,10 @@ 

Source code for pina.model.layers.spectral

         """
         return torch.einsum("bix,iox->box", input, weights)
 
-
[docs] def forward(self, x): - """ +
+[docs] + def forward(self, x): + """ Forward computation for Spectral Convolution. :param x: The input tensor, expect of size @@ -185,18 +555,22 @@

Source code for pina.model.layers.spectral

         )
 
         # Return to physical space
-        return torch.fft.irfft(out_ft, n=x.size(-1))
+ return torch.fft.irfft(out_ft, n=x.size(-1))
+
+ ######## 2D Spectral Convolution ########### -
[docs]class SpectralConvBlock2D(nn.Module): - """ +
+[docs] +class SpectralConvBlock2D(nn.Module): + """ PINA implementation of spectral convolution block for two dimensional tensors. """ def __init__(self, input_numb_fields, output_numb_fields, n_modes): - """ + """ The module computes the spectral convolution of the input with a linear kernel in the fourier space, and then it maps the input back to the physical space. @@ -261,7 +635,7 @@

Source code for pina.model.layers.spectral

         )
 
     def _compute_mult2d(self, input, weights):
-        """
+        """
         Compute the matrix multiplication of the input
         with the linear kernel weights.
 
@@ -277,8 +651,10 @@ 

Source code for pina.model.layers.spectral

         """
         return torch.einsum("bixy,ioxy->boxy", input, weights)
 
-
[docs] def forward(self, x): - """ +
+[docs] + def forward(self, x): + """ Forward computation for Spectral Convolution. :param x: The input tensor, expect of size @@ -313,18 +689,22 @@

Source code for pina.model.layers.spectral

         )
 
         # Return to physical space
-        return torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
+ return torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
+
+ ######## 3D Spectral Convolution ########### -
[docs]class SpectralConvBlock3D(nn.Module): - """ +
+[docs] +class SpectralConvBlock3D(nn.Module): + """ PINA implementation of spectral convolution block for three dimensional tensors. """ def __init__(self, input_numb_fields, output_numb_fields, n_modes): - """ + """ The module computes the spectral convolution of the input with a linear kernel in the fourier space, and then it maps the input back to the physical space. @@ -414,7 +794,7 @@

Source code for pina.model.layers.spectral

         )
 
     def _compute_mult3d(self, input, weights):
-        """
+        """
         Compute the matrix multiplication of the input
         with the linear kernel weights.
 
@@ -430,8 +810,10 @@ 

Source code for pina.model.layers.spectral

         """
         return torch.einsum("bixyz,ioxyz->boxyz", input, weights)
 
-
[docs] def forward(self, x): - """ +
+[docs] + def forward(self, x): + """ Forward computation for Spectral Convolution. :param x: The input tensor, expect of size @@ -495,36 +877,73 @@

Source code for pina.model.layers.spectral

         out_ft[slice3] = self._compute_mult3d(x_ft[slice3], self._weights4)
 
         # Return to physical space
-        return torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
+ return torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
+
+
-
+
+ + + + + +
+ + + +
-
+
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + - - - - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/lno.html b/_modules/pina/model/lno.html index 08975d0c..fbc16c00 100644 --- a/_modules/pina/model/lno.html +++ b/_modules/pina/model/lno.html @@ -1,93 +1,459 @@ + - - - - - pina.model.lno — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.lno — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.model.lno

     """Module LowRank Neural Operator."""
     
    @@ -100,8 +466,10 @@ 

    Source code for pina.model.lno

     from .layers.lowrank_layer import LowRankBlock
     
     
    -
    [docs]class LowRankNeuralOperator(KernelNeuralOperator): - """ +
    +[docs] +class LowRankNeuralOperator(KernelNeuralOperator): + """ Implementation of LowRank Neural Operator. LowRank Neural Operator is a general architecture for @@ -134,7 +502,7 @@

    Source code for pina.model.lno

             func=torch.nn.Tanh,
             bias=True,
         ):
    -        """
    +        """
             :param torch.nn.Module lifting_net: The neural network for lifting
                 the input. It must take as input the input field and the coordinates
                 at which the input field is avaluated. The output of the lifting
    @@ -211,8 +579,10 @@ 

    Source code for pina.model.lno

             )
             super().__init__(lifting_net, integral_net, projecting_net)
     
    -
    [docs] def forward(self, x): - r""" +
    +[docs] + def forward(self, x): + r""" Forward computation for LowRank Neural Operator. It performs a lifting of the input by the ``lifting_net``. Then different layers of LowRank Neural Operator Blocks are applied. @@ -236,36 +606,73 @@

    Source code for pina.model.lno

             for module in self._integral_kernels:
                 x = module(x, coords)
             # projecting
    -        return self._projection_operator(concatenate((x, coords), dim=-1))
    + return self._projection_operator(concatenate((x, coords), dim=-1))
    +
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/multi_feed_forward.html b/_modules/pina/model/multi_feed_forward.html index 24b305c9..cd8ce9f2 100644 --- a/_modules/pina/model/multi_feed_forward.html +++ b/_modules/pina/model/multi_feed_forward.html @@ -1,93 +1,459 @@ + - - - - - pina.model.multi_feed_forward — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.multi_feed_forward — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - +
    + + + + + +
    +

    Source code for pina.model.multi_feed_forward

     """Module for Multi FeedForward model"""
     
    @@ -96,8 +462,10 @@ 

    Source code for pina.model.multi_feed_forward

    from .feed_forward import FeedForward -
    [docs]class MultiFeedForward(torch.nn.Module): - """ +
    +[docs] +class MultiFeedForward(torch.nn.Module): + """ The PINA implementation of MultiFeedForward network. This model allows to create a network with multiple FeedForward combined @@ -115,35 +483,71 @@

    Source code for pina.model.multi_feed_forward

    for name, constructor_args in ffn_dict.items(): setattr(self, name, FeedForward(**constructor_args))
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/network.html b/_modules/pina/model/network.html index 9d5b8de6..4ccd5892 100644 --- a/_modules/pina/model/network.html +++ b/_modules/pina/model/network.html @@ -1,93 +1,459 @@ + - - - - - pina.model.network — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.model.network — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/model/spline.html b/_modules/pina/model/spline.html new file mode 100644 index 00000000..050c5512 --- /dev/null +++ b/_modules/pina/model/spline.html @@ -0,0 +1,699 @@ + + + + + + + + + + pina.model.spline — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +

Source code for pina.model.spline

+"""Module for Spline model"""
+
+import torch
+import torch.nn as nn
+from ..utils import check_consistency
+
+
+
+[docs] +class Spline(torch.nn.Module): + + def __init__(self, order=4, knots=None, control_points=None) -> None: + """ + Spline model. + + :param int order: the order of the spline. + :param torch.Tensor knots: the knot vector. + :param torch.Tensor control_points: the control points. + """ + super().__init__() + + check_consistency(order, int) + + if order < 0: + raise ValueError("Spline order cannot be negative.") + if knots is None and control_points is None: + raise ValueError("Knots and control points cannot be both None.") + + self.order = order + self.k = order - 1 + + if knots is not None and control_points is not None: + self.knots = knots + self.control_points = control_points + + elif knots is not None: + print("Warning: control points will be initialized automatically.") + print(" experimental feature") + + self.knots = knots + n = len(knots) - order + self.control_points = torch.nn.Parameter( + torch.zeros(n), requires_grad=True + ) + + elif control_points is not None: + print("Warning: knots will be initialized automatically.") + print(" experimental feature") + + self.control_points = control_points + + n = len(self.control_points) - 1 + self.knots = { + "type": "auto", + "min": 0, + "max": 1, + "n": n + 2 + self.order, + } + + else: + raise ValueError("Knots and control points cannot be both None.") + + if self.knots.ndim != 1: + raise ValueError("Knot vector must be one-dimensional.") + +
+[docs] + def basis(self, x, k, i, t): + """ + Recursive function to compute the basis functions of the spline. + + :param torch.Tensor x: points to be evaluated. + :param int k: spline degree + :param int i: the index of the interval + :param torch.Tensor t: vector of knots + :return: the basis functions evaluated at x + :rtype: torch.Tensor + """ + + if k == 0: + a = torch.where( + torch.logical_and(t[i] <= x, x < t[i + 1]), 1.0, 0.0 + ) + if i == len(t) - self.order - 1: + a = torch.where(x == t[-1], 1.0, a) + a.requires_grad_(True) + return a + + if t[i + k] == t[i]: + c1 = torch.tensor([0.0] * len(x), requires_grad=True) + else: + c1 = (x - t[i]) / (t[i + k] - t[i]) * self.basis(x, k - 1, i, t) + + if t[i + k + 1] == t[i + 1]: + c2 = torch.tensor([0.0] * len(x), requires_grad=True) + else: + c2 = ( + (t[i + k + 1] - x) + / (t[i + k + 1] - t[i + 1]) + * self.basis(x, k - 1, i + 1, t) + ) + + return c1 + c2
+ + + @property + def control_points(self): + return self._control_points + + @control_points.setter + def control_points(self, value): + if isinstance(value, dict): + if "n" not in value: + raise ValueError("Invalid value for control_points") + n = value["n"] + dim = value.get("dim", 1) + value = torch.zeros(n, dim) + + if not isinstance(value, torch.Tensor): + raise ValueError("Invalid value for control_points") + self._control_points = torch.nn.Parameter(value, requires_grad=True) + + @property + def knots(self): + return self._knots + + @knots.setter + def knots(self, value): + if isinstance(value, dict): + + type_ = value.get("type", "auto") + min_ = value.get("min", 0) + max_ = value.get("max", 1) + n = value.get("n", 10) + + if type_ == "uniform": + value = torch.linspace(min_, max_, n + self.k + 1) + elif type_ == "auto": + initial_knots = torch.ones(self.order + 1) * min_ + final_knots = torch.ones(self.order + 1) * max_ + + if n < self.order + 1: + value = torch.concatenate((initial_knots, final_knots)) + elif n - 2 * self.order + 1 == 1: + value = torch.Tensor([(max_ + min_) / 2]) + else: + value = torch.linspace(min_, max_, n - 2 * self.order - 1) + + value = torch.concatenate((initial_knots, value, final_knots)) + + if not isinstance(value, torch.Tensor): + raise ValueError("Invalid value for knots") + + self._knots = value + +
+[docs] + def forward(self, x_): + """ + Forward pass of the spline model. + + :param torch.Tensor x_: points to be evaluated. + :return: the spline evaluated at x_ + :rtype: torch.Tensor + """ + t = self.knots + k = self.k + c = self.control_points + + basis = map(lambda i: self.basis(x_, k, i, t)[:, None], range(len(c))) + y = (torch.cat(list(basis), dim=1) * c).sum(axis=1) + + return y
+
+ +
+ +
+ + + + + +
+ + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_modules/pina/operators.html b/_modules/pina/operators.html index e64b7df5..db7ec466 100644 --- a/_modules/pina/operators.html +++ b/_modules/pina/operators.html @@ -1,93 +1,459 @@ + - - - - - pina.operators — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.operators — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.operators

     """
     Module for operators vectorize implementation. Differential operators are used to write any differential problem.
    @@ -102,8 +468,10 @@ 

    Source code for pina.operators

     from pina.label_tensor import LabelTensor
     
     
    -
    [docs]def grad(output_, input_, components=None, d=None): - """ +
    +[docs] +def grad(output_, input_, components=None, d=None): + """ Perform gradient operation. The operator works for vectorial and scalar functions, with multiple input coordinates. @@ -123,7 +491,7 @@

    Source code for pina.operators

         """
     
         def grad_scalar_output(output_, input_, d):
    -        """
    +        """
             Perform gradient operation for a scalar output.
     
             :param LabelTensor output_: the output tensor onto which computing the
    @@ -159,7 +527,7 @@ 

    Source code for pina.operators

     
             gradients.labels = input_.labels
             gradients = gradients.extract(d)
    -        gradients.labels = [f"d{output_fieldname}d{i}" for i in d]
    +        gradients.labels = [f"d{output_fieldname}d{i}" for i in d]
     
             return gradients
     
    @@ -194,8 +562,11 @@ 

    Source code for pina.operators

         return gradients
    -
    [docs]def div(output_, input_, components=None, d=None): - """ + +
    +[docs] +def div(output_, input_, components=None, d=None): + """ Perform divergence operation. The operator works for vectorial functions, with multiple input coordinates. @@ -236,7 +607,7 @@

    Source code for pina.operators

         div = torch.zeros(input_.shape[0], 1, device=output_.device)
         labels = [None] * len(components)
         for i, (c, d) in enumerate(zip(components, d)):
    -        c_fields = f"d{c}d{d}"
    +        c_fields = f"d{c}d{d}"
             div[:, 0] += grad_output.extract(c_fields).sum(axis=1)
             labels[i] = c_fields
     
    @@ -245,8 +616,11 @@ 

    Source code for pina.operators

         return div
    -
    [docs]def laplacian(output_, input_, components=None, d=None, method="std"): - """ + +
    +[docs] +def laplacian(output_, input_, components=None, d=None, method="std"): + """ Compute Laplace operator. The operator works for vectorial and scalar functions, with multiple input coordinates. @@ -291,7 +665,7 @@

    Source code for pina.operators

                     result[:, 0] += super(torch.Tensor, gg.T).__getitem__(
                         i
                     )  # TODO improve
    -            labels = [f"dd{components[0]}"]
    +            labels = [f"dd{components[0]}"]
     
             else:
                 result = torch.empty(
    @@ -307,15 +681,18 @@ 

    Source code for pina.operators

     
                     grad_output = grad(output_, input_, components=ci, d=di)
                     result[:, idx] = grad(grad_output, input_, d=di).flatten()
    -                labels[idx] = f"dd{ci}dd{di}"
    +                labels[idx] = f"dd{ci}dd{di}"
     
         result = result.as_subclass(LabelTensor)
         result.labels = labels
         return result
    -
    [docs]def advection(output_, input_, velocity_field, components=None, d=None): - """ + +
    +[docs] +def advection(output_, input_, velocity_field, components=None, d=None): + """ Perform advection operation. The operator works for vectorial functions, with multiple input coordinates. @@ -348,35 +725,71 @@

    Source code for pina.operators

     
         tmp *= output_.extract(velocity_field)
         return tmp.sum(dim=2).T
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/plotter.html b/_modules/pina/plotter.html index 8a403c02..db4bcb9a 100644 --- a/_modules/pina/plotter.html +++ b/_modules/pina/plotter.html @@ -1,93 +1,459 @@ + - - - - - pina.plotter — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.plotter — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.plotter

     """ Module for plotting. """
     
    @@ -97,13 +463,17 @@ 

    Source code for pina.plotter

     from pina import LabelTensor
     
     
    -
    [docs]class Plotter: - """ +
    +[docs] +class Plotter: + """ Implementation of a plotter class, for easy visualizations. """ -
    [docs] def plot_samples(self, problem, variables=None, filename=None, **kwargs): - """ +
    +[docs] + def plot_samples(self, problem, variables=None, filename=None, **kwargs): + """ Plot the training grid samples. :param AbstractProblem problem: The PINA problem from where to plot @@ -171,8 +541,9 @@

    Source code for pina.plotter

             else:
                 plt.show()
    + def _1d_plot(self, pts, pred, v, method, truth_solution=None, **kwargs): - """Plot solution for one dimensional function + """Plot solution for one dimensional function :param pts: Points to plot the solution. :type pts: torch.Tensor @@ -205,7 +576,7 @@

    Source code for pina.plotter

         def _2d_plot(
             self, pts, pred, v, res, method, truth_solution=None, **kwargs
         ):
    -        """Plot solution for two dimensional function
    +        """Plot solution for two dimensional function
     
             :param pts: Points to plot the solution.
             :type pts: torch.Tensor
    @@ -251,7 +622,9 @@ 

    Source code for pina.plotter

                 fig.colorbar(cb, ax=ax)
                 ax.title.set_text("Neural Network prediction")
     
    -
    [docs] def plot( +
    +[docs] + def plot( self, solver, components=None, @@ -262,7 +635,7 @@

    Source code for pina.plotter

             title=None,
             **kwargs,
         ):
    -        """
    +        """
             Plot sample of SolverInterface output.
     
             :param SolverInterface solver: The ``SolverInterface`` object instance.
    @@ -344,7 +717,10 @@ 

    Source code for pina.plotter

             else:
                 plt.show()
    -
    [docs] def plot_loss( + +
    +[docs] + def plot_loss( self, trainer, metrics=None, @@ -353,7 +729,7 @@

    Source code for pina.plotter

             filename=None,
             **kwargs,
         ):
    -        """
    +        """
             Plot the loss function values during traininig.
     
             :param trainer: the PINA Trainer object instance.
    @@ -391,7 +767,7 @@ 

    Source code for pina.plotter

             for metric in metrics:
                 if metric not in trainer_metrics:
                     raise ValueError(
    -                    f"{metric} not a valid metric. Available metrics are {list(trainer_metrics.keys())}."
    +                    f"{metric} not a valid metric. Available metrics are {list(trainer_metrics.keys())}."
                     )
                 loss = trainer_metrics[metric]
                 epochs = range(len(loss))
    @@ -411,36 +787,73 @@ 

    Source code for pina.plotter

             # saving in file
             if filename:
                 plt.savefig(filename)
    -            plt.close()
    + plt.close()
    +
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/problem/abstract_problem.html b/_modules/pina/problem/abstract_problem.html index 0b44d755..292152f4 100644 --- a/_modules/pina/problem/abstract_problem.html +++ b/_modules/pina/problem/abstract_problem.html @@ -1,93 +1,459 @@ + - - - - - pina.problem.abstract_problem — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.problem.abstract_problem — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.problem.abstract_problem

     """ Module for AbstractProblem class """
     
    @@ -97,8 +463,10 @@ 

    Source code for pina.problem.abstract_problem

    import torch -
    [docs]class AbstractProblem(metaclass=ABCMeta): - """ +
    +[docs] +class AbstractProblem(metaclass=ABCMeta): + """ The abstract `AbstractProblem` class. All the class defining a PINA Problem should be inheritied from this class. @@ -122,7 +490,7 @@

    Source code for pina.problem.abstract_problem

    self._span_condition_points() def __deepcopy__(self, memo): - """ + """ Implements deepcopy for the :class:`~pina.problem.abstract_problem.AbstractProblem` class. @@ -140,7 +508,7 @@

    Source code for pina.problem.abstract_problem

    @property def input_variables(self): - """ + """ The input variables of the AbstractProblem, whose type depends on the type of domain (spatial, temporal, and parameter). @@ -163,7 +531,7 @@

    Source code for pina.problem.abstract_problem

    @property def domain(self): - """ + """ The domain(s) where the conditions of the AbstractProblem are valid. If more than one domain type is passed, a list of Location is retured. @@ -172,9 +540,9 @@

    Source code for pina.problem.abstract_problem

    :rtype: list[Location] """ domains = [ - getattr(self, f"{t}_domain") + getattr(self, f"{t}_domain") for t in ["spatial", "temporal", "parameter"] - if hasattr(self, f"{t}_domain") + if hasattr(self, f"{t}_domain") ] if len(domains) == 1: @@ -196,7 +564,7 @@

    Source code for pina.problem.abstract_problem

    @property @abstractmethod def output_variables(self): - """ + """ The output variables of the problem. """ pass @@ -204,13 +572,13 @@

    Source code for pina.problem.abstract_problem

    @property @abstractmethod def conditions(self): - """ + """ The conditions of the problem. """ pass def _span_condition_points(self): - """ + """ Simple function to get the condition points """ for condition_name in self.conditions: @@ -233,10 +601,12 @@

    Source code for pina.problem.abstract_problem

    tensor_var ) -
    [docs] def discretise_domain( +
    +[docs] + def discretise_domain( self, n, mode="random", variables="all", locations="all" ): - """ + """ Generate a set of points to span the `Location` of all the conditions of the problem. @@ -271,7 +641,7 @@

    Source code for pina.problem.abstract_problem

    # check consistency mode check_consistency(mode, str) if mode not in ["random", "grid", "lh", "chebyshev", "latin"]: - raise TypeError(f"mode {mode} not valid.") + raise TypeError(f"mode {mode} not valid.") # check consistency variables if variables == "all": @@ -282,19 +652,25 @@

    Source code for pina.problem.abstract_problem

    if sorted(variables) != sorted(self.input_variables): TypeError( f"Wrong variables for sampling. Variables ", - f"should be in {self.input_variables}.", + f"should be in {self.input_variables}.", ) # check consistency location + locations_to_sample = [ + condition + for condition in self.conditions + if hasattr(self.conditions[condition], "location") + ] if locations == "all": - locations = [condition for condition in self.conditions] + # only locations that can be sampled + locations = locations_to_sample else: check_consistency(locations, str) - if sorted(locations) != sorted(self.conditions): + if sorted(locations) != sorted(locations_to_sample): TypeError( f"Wrong locations for sampling. Location ", - f"should be in {self.conditions}.", + f"should be in {locations_to_sample}.", ) # sampling @@ -332,8 +708,11 @@

    Source code for pina.problem.abstract_problem

    sorted(self.input_variables) )
    -
    [docs] def add_points(self, new_points): - """ + +
    +[docs] + def add_points(self, new_points): + """ Adding points to the already sampled points. :param dict new_points: a dictionary with key the location to add the points @@ -343,7 +722,7 @@

    Source code for pina.problem.abstract_problem

    if sorted(new_points.keys()) != sorted(self.conditions): TypeError( f"Wrong locations for new points. Location ", - f"should be in {self.conditions}.", + f"should be in {self.conditions}.", ) for location in new_points.keys(): @@ -355,7 +734,7 @@

    Source code for pina.problem.abstract_problem

    if sorted(old_pts.labels) != sorted(new_pts.labels): TypeError( f"Not matching variables for old and new points " - f"in condition {location}." + f"in condition {location}." ) if old_pts.labels != new_pts.labels: new_pts = torch.hstack( @@ -368,9 +747,10 @@

    Source code for pina.problem.abstract_problem

    merged_pts.labels = old_pts.labels self.input_pts[location] = merged_pts
    + @property def have_sampled_points(self): - """ + """ Check if all points for ``Location`` are sampled. """ @@ -378,7 +758,7 @@

    Source code for pina.problem.abstract_problem

    @property def not_sampled_points(self): - """ + """ Check which points are not sampled. """ @@ -391,35 +771,71 @@

    Source code for pina.problem.abstract_problem

    if not is_sample: not_sampled.append(condition_name) return not_sampled
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/problem/parametric_problem.html b/_modules/pina/problem/parametric_problem.html index 17f54b30..60c8de57 100644 --- a/_modules/pina/problem/parametric_problem.html +++ b/_modules/pina/problem/parametric_problem.html @@ -1,93 +1,459 @@ + - - - - - pina.problem.parametric_problem — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.problem.parametric_problem — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.problem.parametric_problem

     """Module for the ParametricProblem class"""
     
    @@ -96,8 +462,10 @@ 

    Source code for pina.problem.parametric_problem

    < from .abstract_problem import AbstractProblem -
    [docs]class ParametricProblem(AbstractProblem): - """ +
    +[docs] +class ParametricProblem(AbstractProblem): + """ The class for the definition of parametric problems, i.e., problems with parameters among the input variables. @@ -131,48 +499,87 @@

    Source code for pina.problem.parametric_problem

    < >>> 'D': Condition(CartesianDomain({'x': [0, 1], 'alpha':[1, 10]}), Equation(ode_equation))} """ -
    [docs] @abstractmethod +
    +[docs] + @abstractmethod def parameter_domain(self): - """ + """ The parameters' domain of the problem. """ pass
    + @property def parameters(self): - """ + """ The parameters' variables of the problem. """ return self.parameter_domain.variables
    +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/problem/spatial_problem.html b/_modules/pina/problem/spatial_problem.html index f3a9a001..ecd79ec3 100644 --- a/_modules/pina/problem/spatial_problem.html +++ b/_modules/pina/problem/spatial_problem.html @@ -1,93 +1,459 @@ + - - - - - pina.problem.spatial_problem — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.problem.spatial_problem — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.problem.spatial_problem

     """Module for the SpatialProblem class"""
     
    @@ -96,8 +462,10 @@ 

    Source code for pina.problem.spatial_problem

    from .abstract_problem import AbstractProblem
     
     
    -
    [docs]class SpatialProblem(AbstractProblem): - """ +
    +[docs] +class SpatialProblem(AbstractProblem): + """ The class for the definition of spatial problems, i.e., for problems with spatial input variables. @@ -127,48 +495,87 @@

    Source code for pina.problem.spatial_problem

            >>>         'D': Condition(CartesianDomain({'x': [0, 1], 'alpha':[1, 10]}), Equation(ode_equation))}
         """
     
    -
    [docs] @abstractmethod +
    +[docs] + @abstractmethod def spatial_domain(self): - """ + """ The spatial domain of the problem. """ pass
    + @property def spatial_variables(self): - """ + """ The spatial input variables of the problem. """ return self.spatial_domain.variables
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/problem/timedep_problem.html b/_modules/pina/problem/timedep_problem.html index 0919adce..71f05806 100644 --- a/_modules/pina/problem/timedep_problem.html +++ b/_modules/pina/problem/timedep_problem.html @@ -1,93 +1,459 @@ + - - - - - pina.problem.timedep_problem — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.problem.timedep_problem — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.problem.timedep_problem

     """Module for the TimeDependentProblem class"""
     
    @@ -96,8 +462,10 @@ 

    Source code for pina.problem.timedep_problem

    from .abstract_problem import AbstractProblem
     
     
    -
    [docs]class TimeDependentProblem(AbstractProblem): - """ +
    +[docs] +class TimeDependentProblem(AbstractProblem): + """ The class for the definition of time-dependent problems, i.e., for problems depending on time. @@ -137,48 +505,87 @@

    Source code for pina.problem.timedep_problem

            >>>         'D': Condition(CartesianDomain({'x': [0, 3], 't':[0, 1]}), Equation(wave_equation))}
         """
     
    -
    [docs] @abstractmethod +
    +[docs] + @abstractmethod def temporal_domain(self): - """ + """ The temporal domain of the problem. """ pass
    + @property def temporal_variable(self): - """ + """ The time variable of the problem. """ return self.temporal_domain.variables
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/garom.html b/_modules/pina/solvers/garom.html index 697ce3f1..e7a17a5c 100644 --- a/_modules/pina/solvers/garom.html +++ b/_modules/pina/solvers/garom.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.garom — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.garom — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.solvers.garom

     """ Module for GAROM """
     
    @@ -108,8 +474,10 @@ 

    Source code for pina.solvers.garom

     from torch.nn.modules.loss import _Loss
     
     
    -
    [docs]class GAROM(SolverInterface): - """ +
    +[docs] +class GAROM(SolverInterface): + """ GAROM solver class. This class implements Generative Adversarial Reduced Order Model solver, using user specified ``models`` to solve a specific order reduction``problem``. @@ -140,7 +508,7 @@

    Source code for pina.solvers.garom

             lambda_k=0.001,
             regularizer=False,
         ):
    -        """
    +        """
             :param AbstractProblem problem: The formualation of the problem.
             :param torch.nn.Module generator: The neural network model to use
                 for the generator.
    @@ -226,8 +594,10 @@ 

    Source code for pina.solvers.garom

             self._generator = self.models[0]
             self._discriminator = self.models[1]
     
    -
    [docs] def forward(self, x, mc_steps=20, variance=False): - """ +
    +[docs] + def forward(self, x, mc_steps=20, variance=False): + """ Forward step for GAROM solver :param x: The input tensor. @@ -255,8 +625,11 @@

    Source code for pina.solvers.garom

     
             return mean
    -
    [docs] def configure_optimizers(self): - """ + +
    +[docs] + def configure_optimizers(self): + """ Optimizer configuration for the GAROM solver. @@ -265,12 +638,13 @@

    Source code for pina.solvers.garom

             """
             return self.optimizers, self._schedulers
    + def sample(self, x): # sampling return self.generator(x) def _train_generator(self, parameters, snapshots): - """ + """ Private method to train the generator network. """ optimizer = self.optimizer_generator @@ -294,7 +668,7 @@

    Source code for pina.solvers.garom

             return r_loss, g_loss
     
         def _train_discriminator(self, parameters, snapshots):
    -        """
    +        """
             Private method to train the discriminator network.
             """
             optimizer = self.optimizer_discriminator
    @@ -321,7 +695,7 @@ 

    Source code for pina.solvers.garom

             return d_loss_real, d_loss_fake, d_loss
     
         def _update_weights(self, d_loss_real, d_loss_fake):
    -        """
    +        """
             Private method to Update the weights of the generator and discriminator
             networks.
             """
    @@ -333,8 +707,10 @@ 

    Source code for pina.solvers.garom

             self.k = min(max(self.k, 0), 1)  # Constraint to interval [0, 1]
             return diff
     
    -
    [docs] def training_step(self, batch, batch_idx): - """GAROM solver training step. +
    +[docs] + def training_step(self, batch, batch_idx): + """GAROM solver training step. :param batch: The batch element in the dataloader. :type batch: tuple @@ -410,6 +786,7 @@

    Source code for pina.solvers.garom

     
             return
    + @property def generator(self): return self._generator @@ -433,35 +810,71 @@

    Source code for pina.solvers.garom

         @property
         def scheduler_discriminator(self):
             return self._schedulers[1]
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/pinns/basepinn.html b/_modules/pina/solvers/pinns/basepinn.html index a112f758..48bbf1ec 100644 --- a/_modules/pina/solvers/pinns/basepinn.html +++ b/_modules/pina/solvers/pinns/basepinn.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.pinns.basepinn — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.pinns.basepinn — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/pinns/causalpinn.html b/_modules/pina/solvers/pinns/causalpinn.html index d545687b..c9ce0cbf 100644 --- a/_modules/pina/solvers/pinns/causalpinn.html +++ b/_modules/pina/solvers/pinns/causalpinn.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.pinns.causalpinn — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.pinns.causalpinn — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + - -
- - -
- -
-
-
-
+ + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + +
-
-
- + +
+ + +
+
+ + + + + +
+

Source code for pina.solvers.pinns.causalpinn

 """ Module for CausalPINN """
 
@@ -101,8 +467,10 @@ 

Source code for pina.solvers.pinns.causalpinn

from pina.utils import check_consistency -
[docs]class CausalPINN(PINN): - r""" +
+[docs] +class CausalPINN(PINN): + r""" Causal Physics Informed Neural Network (PINN) solver class. This class implements Causal Physics Informed Neural Network solvers, using a user specified ``model`` to solve a specific @@ -173,7 +541,7 @@

Source code for pina.solvers.pinns.causalpinn

scheduler_kwargs={"factor": 1, "total_iters": 0}, eps=100, ): - """ + """ :param AbstractProblem problem: The formulation of the problem. :param torch.nn.Module model: The neural network model to use. :param torch.nn.Module loss: The loss function used as minimizer, @@ -210,8 +578,10 @@

Source code for pina.solvers.pinns.causalpinn

"inheritig from TimeDependentProblem." ) -
[docs] def loss_phys(self, samples, equation): - """ +
+[docs] + def loss_phys(self, samples, equation): + """ Computes the physics loss for the Causal PINN solver based on given samples and equation. @@ -245,16 +615,17 @@

Source code for pina.solvers.pinns.causalpinn

weights = self._compute_weights(time_loss) return (weights * time_loss).mean()
+ @property def eps(self): - """ + """ The exponential decay parameter. """ return self._eps @eps.setter def eps(self, value): - """ + """ Setter method for the eps parameter. :param float value: The exponential decay parameter. @@ -263,7 +634,7 @@

Source code for pina.solvers.pinns.causalpinn

self._eps = value def _sort_label_tensor(self, tensor): - """ + """ Sorts the label tensor based on time variables. :param LabelTensor tensor: The label tensor to be sorted. @@ -281,7 +652,7 @@

Source code for pina.solvers.pinns.causalpinn

return tensor def _split_tensor_into_chunks(self, tensor): - """ + """ Splits the label tensor into chunks based on time. :param LabelTensor tensor: The label tensor to be split. @@ -301,7 +672,7 @@

Source code for pina.solvers.pinns.causalpinn

return chunks, labels # return chunks def _compute_weights(self, loss): - """ + """ Computes the weights for the physics loss based on the cumulative loss. :param LabelTensor loss: The physics loss values. @@ -312,35 +683,71 @@

Source code for pina.solvers.pinns.causalpinn

cumulative_loss = self._eps * torch.cumsum(loss, dim=0) # return the exponential of the weghited negative cumulative sum return torch.exp(-cumulative_loss)
+
-
+ + + + + + +
+ + + +
-
+
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + - - - - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/pinns/competitive_pinn.html b/_modules/pina/solvers/pinns/competitive_pinn.html index 2c0b7d14..41d4c6ce 100644 --- a/_modules/pina/solvers/pinns/competitive_pinn.html +++ b/_modules/pina/solvers/pinns/competitive_pinn.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.pinns.competitive_pinn — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.pinns.competitive_pinn — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
- -
- - -
- -
-
-
-
+ + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
-
-
- + + + + + +
+

Source code for pina.solvers.pinns.competitive_pinn

 """ Module for CompetitivePINN """
 
@@ -108,8 +474,10 @@ 

Source code for pina.solvers.pinns.competitive_pinn

from pina.problem import InverseProblem -
[docs]class CompetitivePINN(PINNInterface): - r""" +
+[docs] +class CompetitivePINN(PINNInterface): + r""" Competitive Physics Informed Neural Network (PINN) solver class. This class implements Competitive Physics Informed Neural Network solvers, using a user specified ``model`` to solve a specific @@ -170,7 +538,7 @@

Source code for pina.solvers.pinns.competitive_pinn

scheduler_discriminator=ConstantLR, scheduler_discriminator_kwargs={"factor": 1, "total_iters": 0}, ): - """ + """ :param AbstractProblem problem: The formualation of the problem. :param torch.nn.Module model: The neural network model to use for the model. @@ -231,8 +599,10 @@

Source code for pina.solvers.pinns.competitive_pinn

self._model = self.models[0] self._discriminator = self.models[1] -
[docs] def forward(self, x): - r""" +
+[docs] + def forward(self, x): + r""" Forward pass implementation for the PINN solver. It returns the function evaluation :math:`\mathbf{u}(\mathbf{x})` at the control points :math:`\mathbf{x}`. @@ -245,8 +615,11 @@

Source code for pina.solvers.pinns.competitive_pinn

""" return self.neural_net(x)
-
[docs] def loss_phys(self, samples, equation): - """ + +
+[docs] + def loss_phys(self, samples, equation): + """ Computes the physics loss for the Competitive PINN solver based on given samples and equation. @@ -272,8 +645,11 @@

Source code for pina.solvers.pinns.competitive_pinn

self._train_discriminator(samples, equation, discriminator_bets) return loss_val
-
[docs] def loss_data(self, input_tensor, output_tensor): - """ + +
+[docs] + def loss_data(self, input_tensor, output_tensor): + """ The data loss for the PINN solver. It computes the loss between the network output against the true solution. @@ -293,8 +669,11 @@

Source code for pina.solvers.pinns.competitive_pinn

self.optimizer_model.step() return loss_val
-
[docs] def configure_optimizers(self): - """ + +
+[docs] + def configure_optimizers(self): + """ Optimizer configuration for the Competitive PINN solver. :return: The optimizers and the schedulers @@ -313,8 +692,11 @@

Source code for pina.solvers.pinns.competitive_pinn

) return self.optimizers, self._schedulers
-
[docs] def on_train_batch_end(self, outputs, batch, batch_idx): - """ + +
+[docs] + def on_train_batch_end(self, outputs, batch, batch_idx): + """ This method is called at the end of each training batch, and ovverides the PytorchLightining implementation for logging the checkpoints. @@ -332,8 +714,9 @@

Source code for pina.solvers.pinns.competitive_pinn

) return super().on_train_batch_end(outputs, batch, batch_idx)
+ def _train_discriminator(self, samples, equation, discriminator_bets): - """ + """ Trains the discriminator network of the Competitive PINN. :param LabelTensor samples: Input samples to evaluate the physics loss. @@ -361,7 +744,7 @@

Source code for pina.solvers.pinns.competitive_pinn

return def _train_model(self, samples, equation, discriminator_bets): - """ + """ Trains the model network of the Competitive PINN. :param LabelTensor samples: Input samples to evaluate the physics loss. @@ -393,7 +776,7 @@

Source code for pina.solvers.pinns.competitive_pinn

@property def neural_net(self): - """ + """ Returns the neural network model. :return: The neural network model. @@ -403,7 +786,7 @@

Source code for pina.solvers.pinns.competitive_pinn

@property def discriminator(self): - """ + """ Returns the discriminator model (if applicable). :return: The discriminator model. @@ -413,7 +796,7 @@

Source code for pina.solvers.pinns.competitive_pinn

@property def optimizer_model(self): - """ + """ Returns the optimizer associated with the neural network model. :return: The optimizer for the neural network model. @@ -423,7 +806,7 @@

Source code for pina.solvers.pinns.competitive_pinn

@property def optimizer_discriminator(self): - """ + """ Returns the optimizer associated with the discriminator (if applicable). :return: The optimizer for the discriminator. @@ -433,7 +816,7 @@

Source code for pina.solvers.pinns.competitive_pinn

@property def scheduler_model(self): - """ + """ Returns the scheduler associated with the neural network model. :return: The scheduler for the neural network model. @@ -443,42 +826,78 @@

Source code for pina.solvers.pinns.competitive_pinn

@property def scheduler_discriminator(self): - """ + """ Returns the scheduler associated with the discriminator (if applicable). :return: The scheduler for the discriminator. :rtype: torch.optim.lr_scheduler._LRScheduler """ return self._schedulers[1]
+
-
+ + + + + + +
+ + + +
-
+
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + - - - - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/pinns/gpinn.html b/_modules/pina/solvers/pinns/gpinn.html index 53f27589..b7440f97 100644 --- a/_modules/pina/solvers/pinns/gpinn.html +++ b/_modules/pina/solvers/pinns/gpinn.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.pinns.gpinn — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.pinns.gpinn — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + - -
- - -
- -
-
-
-
+ + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + +
-
-
- + +
+ + +
+
+ + + + + +
+

Source code for pina.solvers.pinns.gpinn

 """ Module for GPINN """
 
@@ -101,8 +467,10 @@ 

Source code for pina.solvers.pinns.gpinn

 from pina.problem import SpatialProblem
 
 
-
[docs]class GPINN(PINN): - r""" +
+[docs] +class GPINN(PINN): + r""" Gradient Physics Informed Neural Network (GPINN) solver class. This class implements Gradient Physics Informed Neural Network solvers, using a user specified ``model`` to solve a specific @@ -163,7 +531,7 @@

Source code for pina.solvers.pinns.gpinn

         scheduler=ConstantLR,
         scheduler_kwargs={"factor": 1, "total_iters": 0},
     ):
-        """
+        """
         :param AbstractProblem problem: The formulation of the problem. It must
             inherit from at least
             :class:`~pina.problem.spatial_problem.SpatialProblem` in order to
@@ -198,8 +566,10 @@ 

Source code for pina.solvers.pinns.gpinn

                 "a SpatialProblem."
             )
 
-
[docs] def loss_phys(self, samples, equation): - """ +
+[docs] + def loss_phys(self, samples, equation): + """ Computes the physics loss for the GPINN solver based on given samples and equation. @@ -223,36 +593,73 @@

Source code for pina.solvers.pinns.gpinn

         g_loss_phys = self.loss(
             torch.zeros_like(loss_grad, requires_grad=True), loss_grad
         )
-        return loss_value + g_loss_phys
+ return loss_value + g_loss_phys
+
+
-
+
+ + + + + +
+ + + +
-
+
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + - - - - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/pinns/pinn.html b/_modules/pina/solvers/pinns/pinn.html index ad1b84fc..11781a33 100644 --- a/_modules/pina/solvers/pinns/pinn.html +++ b/_modules/pina/solvers/pinns/pinn.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.pinns.pinn — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.pinns.pinn — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.solvers.pinns.pinn

     """ Module for Physics Informed Neural Network. """
     
    @@ -107,8 +473,10 @@ 

    Source code for pina.solvers.pinns.pinn

     from pina.problem import InverseProblem
     
     
    -
    [docs]class PINN(PINNInterface): - r""" +
    +[docs] +class PINN(PINNInterface): + r""" Physics Informed Neural Network (PINN) solver class. This class implements Physics Informed Neural Network solvers, using a user specified ``model`` to solve a specific @@ -158,7 +526,7 @@

    Source code for pina.solvers.pinns.pinn

             scheduler=ConstantLR,
             scheduler_kwargs={"factor": 1, "total_iters": 0},
         ):
    -        """
    +        """
             :param AbstractProblem problem: The formulation of the problem.
             :param torch.nn.Module model: The neural network model to use.
             :param torch.nn.Module loss: The loss function used as minimizer,
    @@ -189,8 +557,10 @@ 

    Source code for pina.solvers.pinns.pinn

             self._scheduler = scheduler(self.optimizers[0], **scheduler_kwargs)
             self._neural_net = self.models[0]
     
    -
    [docs] def forward(self, x): - r""" +
    +[docs] + def forward(self, x): + r""" Forward pass implementation for the PINN solver. It returns the function evaluation :math:`\mathbf{u}(\mathbf{x})` at the control points :math:`\mathbf{x}`. @@ -203,8 +573,11 @@

    Source code for pina.solvers.pinns.pinn

             """
             return self.neural_net(x)
    -
    [docs] def loss_phys(self, samples, equation): - """ + +
    +[docs] + def loss_phys(self, samples, equation): + """ Computes the physics loss for the PINN solver based on given samples and equation. @@ -222,8 +595,11 @@

    Source code for pina.solvers.pinns.pinn

             self.store_log(loss_value=float(loss_value))
             return loss_value
    -
    [docs] def configure_optimizers(self): - """ + +
    +[docs] + def configure_optimizers(self): + """ Optimizer configuration for the PINN solver. @@ -243,48 +619,85 @@

    Source code for pina.solvers.pinns.pinn

                 )
             return self.optimizers, [self.scheduler]
    + @property def scheduler(self): - """ + """ Scheduler for the PINN training. """ return self._scheduler @property def neural_net(self): - """ + """ Neural network for the PINN training. """ return self._neural_net
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/pinns/rbapinn.html b/_modules/pina/solvers/pinns/rbapinn.html index 95e70934..9c46260b 100644 --- a/_modules/pina/solvers/pinns/rbapinn.html +++ b/_modules/pina/solvers/pinns/rbapinn.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.pinns.rbapinn — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.pinns.rbapinn — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + - -
- - -
- -
-
-
-
+ + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + +
-
-
- + +
+ + +
+
+ + + + + +
+

Source code for pina.solvers.pinns.rbapinn

 """ Module for RBAPINN. """
 
@@ -98,8 +464,10 @@ 

Source code for pina.solvers.pinns.rbapinn

 from ...utils import check_consistency
 
 
-
[docs]class RBAPINN(PINN): - r""" +
+[docs] +class RBAPINN(PINN): + r""" Residual-based Attention PINN (RBAPINN) solver class. This class implements Residual-based Attention Physics Informed Neural Network solvers, using a user specified ``model`` to solve a specific @@ -170,7 +538,7 @@

Source code for pina.solvers.pinns.rbapinn

         eta=0.001,
         gamma=0.999,
     ):
-        """
+        """
         :param AbstractProblem problem: The formulation of the problem.
         :param torch.nn.Module model: The neural network model to use.
         :param torch.nn.Module extra_features: The additional input
@@ -215,7 +583,7 @@ 

Source code for pina.solvers.pinns.rbapinn

         self._vectorial_loss.reduction = "none"
 
     def _vect_to_scalar(self, loss_value):
-        """
+        """
         Elaboration of the pointwise loss.
 
         :param LabelTensor loss_value: the matrix of pointwise loss.
@@ -229,13 +597,15 @@ 

Source code for pina.solvers.pinns.rbapinn

             ret = torch.sum(loss_value)
         else:
             raise RuntimeError(
-                f"Invalid reduction, got {self.loss.reduction} "
+                f"Invalid reduction, got {self.loss.reduction} "
                 "but expected mean or sum."
             )
         return ret
 
-
[docs] def loss_phys(self, samples, equation): - """ +
+[docs] + def loss_phys(self, samples, equation): + """ Computes the physics loss for the residual-based attention PINN solver based on given samples and equation. @@ -258,36 +628,73 @@

Source code for pina.solvers.pinns.rbapinn

 
         self.store_log(loss_value=float(self._vect_to_scalar(loss_value)))
 
-        return self._vect_to_scalar(self.weights[cond] ** 2 * loss_value)
+ return self._vect_to_scalar(self.weights[cond] ** 2 * loss_value)
+
+
-
+
+ + + + + +
+ + + +
-
+
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + - - - - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/pinns/sapinn.html b/_modules/pina/solvers/pinns/sapinn.html index 6ad72cf8..e5bcf0f9 100644 --- a/_modules/pina/solvers/pinns/sapinn.html +++ b/_modules/pina/solvers/pinns/sapinn.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.pinns.sapinn — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.pinns.sapinn — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - + +
    + + +
    +
    + + + + + +
    +

    Source code for pina.solvers.pinns.sapinn

     import torch
     from copy import deepcopy
    @@ -107,14 +473,14 @@ 

    Source code for pina.solvers.pinns.sapinn

     
     
     class Weights(torch.nn.Module):
    -    """
    +    """
         This class aims to implements the mask model for
         self adaptive weights of the Self-Adaptive
         PINN solver.
         """
     
         def __init__(self, func):
    -        """
    +        """
             :param torch.nn.Module func: the mask module of SAPINN
             """
             super().__init__()
    @@ -123,7 +489,7 @@ 

    Source code for pina.solvers.pinns.sapinn

             self.func = func
     
         def forward(self):
    -        """
    +        """
             Forward pass implementation for the mask module.
             It returns the function on the weights
             evaluation.
    @@ -134,8 +500,10 @@ 

    Source code for pina.solvers.pinns.sapinn

             return self.func(self.sa_weights)
     
     
    -
    [docs]class SAPINN(PINNInterface): - r""" +
    +[docs] +class SAPINN(PINNInterface): + r""" Self Adaptive Physics Informed Neural Network (SAPINN) solver class. This class implements Self-Adaptive Physics Informed Neural Network solvers, using a user specified ``model`` to solve a specific @@ -214,7 +582,7 @@

    Source code for pina.solvers.pinns.sapinn

             scheduler_weights=ConstantLR,
             scheduler_weights_kwargs={"factor": 1, "total_iters": 0},
         ):
    -        """
    +        """
             :param AbstractProblem problem: The formualation of the problem.
             :param torch.nn.Module model: The neural network model to use
                 for the model.
    @@ -291,8 +659,10 @@ 

    Source code for pina.solvers.pinns.sapinn

             self._vectorial_loss = deepcopy(loss)
             self._vectorial_loss.reduction = "none"
     
    -
    [docs] def forward(self, x): - """ +
    +[docs] + def forward(self, x): + """ Forward pass implementation for the PINN solver. It returns the function evaluation :math:`\mathbf{u}(\mathbf{x})` at the control points @@ -306,8 +676,11 @@

    Source code for pina.solvers.pinns.sapinn

             """
             return self.neural_net(x)
    -
    [docs] def loss_phys(self, samples, equation): - """ + +
    +[docs] + def loss_phys(self, samples, equation): + """ Computes the physics loss for the SAPINN solver based on given samples and equation. @@ -342,8 +715,11 @@

    Source code for pina.solvers.pinns.sapinn

             self.store_log(loss_value=float(loss))
             return loss_value
    -
    [docs] def loss_data(self, input_tensor, output_tensor): - """ + +
    +[docs] + def loss_data(self, input_tensor, output_tensor): + """ Computes the data loss for the SAPINN solver based on input and output. It computes the loss between the network output against the true solution. @@ -378,8 +754,11 @@

    Source code for pina.solvers.pinns.sapinn

             self.store_log(loss_value=float(loss))
             return loss_value
    -
    [docs] def configure_optimizers(self): - """ + +
    +[docs] + def configure_optimizers(self): + """ Optimizer configuration for the SAPINN solver. @@ -399,8 +778,11 @@

    Source code for pina.solvers.pinns.sapinn

                 )
             return self.optimizers, self._schedulers
    -
    [docs] def on_train_batch_end(self, outputs, batch, batch_idx): - """ + +
    +[docs] + def on_train_batch_end(self, outputs, batch, batch_idx): + """ This method is called at the end of each training batch, and ovverides the PytorchLightining implementation for logging the checkpoints. @@ -418,8 +800,11 @@

    Source code for pina.solvers.pinns.sapinn

             )
             return super().on_train_batch_end(outputs, batch, batch_idx)
    -
    [docs] def on_train_start(self): - """ + +
    +[docs] + def on_train_start(self): + """ This method is called at the start of the training for setting the self adaptive weights as parameters of the mask model. @@ -436,8 +821,11 @@

    Source code for pina.solvers.pinns.sapinn

                 )
             return super().on_train_start()
    -
    [docs] def on_load_checkpoint(self, checkpoint): - """ + +
    +[docs] + def on_load_checkpoint(self, checkpoint): + """ Overriding the Pytorch Lightning ``on_load_checkpoint`` to handle checkpoints for Self Adaptive Weights. This method should not be overridden if not intentionally. @@ -450,8 +838,9 @@

    Source code for pina.solvers.pinns.sapinn

                 )
             return super().on_load_checkpoint(checkpoint)
    + def _loss_phys(self, samples, equation): - """ + """ Elaboration of the physical loss for the SAPINN solver. :param LabelTensor samples: Input samples to evaluate the physics loss. @@ -465,7 +854,7 @@

    Source code for pina.solvers.pinns.sapinn

             return self._compute_loss(residual)
     
         def _loss_data(self, input_tensor, output_tensor):
    -        """
    +        """
             Elaboration of the loss related to data for the SAPINN solver.
     
             :param LabelTensor input_tensor: The input to the neural networks.
    @@ -479,7 +868,7 @@ 

    Source code for pina.solvers.pinns.sapinn

             return self._compute_loss(residual)
     
         def _compute_loss(self, residual):
    -        """
    +        """
             Elaboration of the pointwise loss through the mask model and the
             self adaptive weights
     
    @@ -501,7 +890,7 @@ 

    Source code for pina.solvers.pinns.sapinn

             )
     
         def _vect_to_scalar(self, loss_value):
    -        """
    +        """
             Elaboration of the pointwise loss through the mask model and the
             self adaptive weights
     
    @@ -516,14 +905,14 @@ 

    Source code for pina.solvers.pinns.sapinn

                 ret = torch.sum(loss_value)
             else:
                 raise RuntimeError(
    -                f"Invalid reduction, got {self.loss.reduction} "
    +                f"Invalid reduction, got {self.loss.reduction} "
                     "but expected mean or sum."
                 )
             return ret
     
         @property
         def neural_net(self):
    -        """
    +        """
             Returns the neural network model.
     
             :return: The neural network model.
    @@ -533,7 +922,7 @@ 

    Source code for pina.solvers.pinns.sapinn

     
         @property
         def weights_dict(self):
    -        """
    +        """
             Return the mask models associate to the application of
             the mask to the self adaptive weights for each loss that
             compones the global loss of the problem.
    @@ -545,7 +934,7 @@ 

    Source code for pina.solvers.pinns.sapinn

     
         @property
         def scheduler_model(self):
    -        """
    +        """
             Returns the scheduler associated with the neural network model.
     
             :return: The scheduler for the neural network model.
    @@ -555,7 +944,7 @@ 

    Source code for pina.solvers.pinns.sapinn

     
         @property
         def scheduler_weights(self):
    -        """
    +        """
             Returns the scheduler associated with the mask model (if applicable).
     
             :return: The scheduler for the mask model.
    @@ -565,7 +954,7 @@ 

    Source code for pina.solvers.pinns.sapinn

     
         @property
         def optimizer_model(self):
    -        """
    +        """
             Returns the optimizer associated with the neural network model.
     
             :return: The optimizer for the neural network model.
    @@ -575,42 +964,78 @@ 

    Source code for pina.solvers.pinns.sapinn

     
         @property
         def optimizer_weights(self):
    -        """
    +        """
             Returns the optimizer associated with the mask model (if applicable).
     
             :return: The optimizer for the mask model.
             :rtype: torch.optim.Optimizer
             """
             return self.optimizers[1]
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/rom.html b/_modules/pina/solvers/rom.html index c63b1240..701ad2c1 100644 --- a/_modules/pina/solvers/rom.html +++ b/_modules/pina/solvers/rom.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.rom — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.rom — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/solver.html b/_modules/pina/solvers/solver.html index ef3ca276..d48dc5c7 100644 --- a/_modules/pina/solvers/solver.html +++ b/_modules/pina/solvers/solver.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.solver — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.solver — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
- -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - +
    + + + + + +
    +

    Source code for pina.solvers.solver

     """ Solver module. """
     
    @@ -100,8 +466,10 @@ 

    Source code for pina.solvers.solver

     import sys
     
     
    -
    [docs]class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta): - """ +
    +[docs] +class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta): + """ Solver base class. This class inherits is a wrapper of LightningModule class, inheriting all the LightningModule methods. @@ -115,7 +483,7 @@

    Source code for pina.solvers.solver

             optimizers_kwargs,
             extra_features=None,
         ):
    -        """
    +        """
             :param models: A torch neural network model instance.
             :type models: torch.nn.Module
             :param problem: A problem definition instance.
    @@ -153,7 +521,7 @@ 

    Source code for pina.solvers.solver

             if len_model != len_optimizer:
                 raise ValueError(
                     "You must define one optimizer for each model."
    -                f"Got {len_model} models, and {len_optimizer}"
    +                f"Got {len_model} models, and {len_optimizer}"
                     " optimizers."
                 )
     
    @@ -162,8 +530,8 @@ 

    Source code for pina.solvers.solver

                 raise ValueError(
                     "You must define one dictionary of keyword"
                     " arguments for each optimizers."
    -                f"Got {len_optimizer} optimizers, and"
    -                f" {len_optimizer_kwargs} dicitionaries"
    +                f"Got {len_optimizer} optimizers, and"
    +                f" {len_optimizer_kwargs} dicitionaries"
                 )
     
             # extra features handling
    @@ -177,8 +545,8 @@ 

    Source code for pina.solvers.solver

                     if len(extra_features) != len_model:
                         raise ValueError(
                             "You passed a list of extrafeatures list with len"
    -                        f"different of models len. Expected {len_model} "
    -                        f"got {len(extra_features)}. If you want to use "
    +                        f"different of models len. Expected {len_model} "
    +                        f"got {len(extra_features)}. If you want to use "
                             "the same list of extra features for all models, "
                             "just pass a list of extrafeatures and not a list "
                             "of list of extra features."
    @@ -204,38 +572,49 @@ 

    Source code for pina.solvers.solver

             # assigning problem
             self._pina_problem = problem
     
    -
    [docs] @abstractmethod +
    +[docs] + @abstractmethod def forward(self, *args, **kwargs): pass
    -
    [docs] @abstractmethod + +
    +[docs] + @abstractmethod def training_step(self): pass
    -
    [docs] @abstractmethod + +
    +[docs] + @abstractmethod def configure_optimizers(self): pass
    + @property def models(self): - """ + """ The torch model.""" return self._pina_models @property def optimizers(self): - """ + """ The torch model.""" return self._pina_optimizers @property def problem(self): - """ + """ The problem formulation.""" return self._pina_problem -
    [docs] def on_train_start(self): - """ +
    +[docs] + def on_train_start(self): + """ On training epoch start this function is call to do global checks for the different solvers. """ @@ -246,7 +625,9 @@

    Source code for pina.solvers.solver

                 dataloader = dataloader.loaders
             self._dataloader = dataloader
     
    -        return super().on_train_start()
    + return super().on_train_start()
    +
    + # @model.setter # def model(self, new_model): @@ -263,33 +644,68 @@

    Source code for pina.solvers.solver

         #     self._problem = problem
     
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/solvers/supervised.html b/_modules/pina/solvers/supervised.html index 38b843e9..4029293a 100644 --- a/_modules/pina/solvers/supervised.html +++ b/_modules/pina/solvers/supervised.html @@ -1,93 +1,459 @@ + - - - - - pina.solvers.supervised — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.solvers.supervised — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
- -
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - +
    + + + + + +
    +

    Source code for pina.solvers.supervised

     """ Module for SupervisedSolver """
     
    @@ -109,8 +475,10 @@ 

    Source code for pina.solvers.supervised

     from torch.nn.modules.loss import _Loss
     
     
    -
    [docs]class SupervisedSolver(SolverInterface): - r""" +
    +[docs] +class SupervisedSolver(SolverInterface): + r""" SupervisedSolver solver class. This class implements a SupervisedSolver, using a user specified ``model`` to solve a specific ``problem``. @@ -149,7 +517,7 @@

    Source code for pina.solvers.supervised

             scheduler=ConstantLR,
             scheduler_kwargs={"factor": 1, "total_iters": 0},
         ):
    -        """
    +        """
             :param AbstractProblem problem: The formualation of the problem.
             :param torch.nn.Module model: The neural network model to use.
             :param torch.nn.Module loss: The loss function used as minimizer,
    @@ -182,8 +550,10 @@ 

    Source code for pina.solvers.supervised

             self._loss = loss
             self._neural_net = self.models[0]
     
    -
    [docs] def forward(self, x): - """Forward pass implementation for the solver. +
    +[docs] + def forward(self, x): + """Forward pass implementation for the solver. :param torch.Tensor x: Input tensor. :return: Solver solution. @@ -191,16 +561,22 @@

    Source code for pina.solvers.supervised

             """
             return self.neural_net(x)
    -
    [docs] def configure_optimizers(self): - """Optimizer configuration for the solver. + +
    +[docs] + def configure_optimizers(self): + """Optimizer configuration for the solver. :return: The optimizers and the schedulers :rtype: tuple(list, list) """ return self.optimizers, [self.scheduler]
    -
    [docs] def training_step(self, batch, batch_idx): - """Solver training step. + +
    +[docs] + def training_step(self, batch, batch_idx): + """Solver training step. :param batch: The batch element in the dataloader. :type batch: tuple @@ -225,7 +601,7 @@

    Source code for pina.solvers.supervised

                 # for data driven mode
                 if not hasattr(condition, "output_points"):
                     raise NotImplementedError(
    -                    f"{type(self).__name__} works only in data-driven mode."
    +                    f"{type(self).__name__} works only in data-driven mode."
                     )
     
                 output_pts = out[condition_idx == condition_id]
    @@ -240,8 +616,11 @@ 

    Source code for pina.solvers.supervised

             self.log("mean_loss", float(loss), prog_bar=True, logger=True)
             return loss
    -
    [docs] def loss_data(self, input_pts, output_pts): - """ + +
    +[docs] + def loss_data(self, input_pts, output_pts): + """ The data loss for the Supervised solver. It computes the loss between the network output against the true solution. This function should not be override if not intentionally. @@ -254,55 +633,92 @@

    Source code for pina.solvers.supervised

             """
             return self.loss(self.forward(input_pts), output_pts)
    + @property def scheduler(self): - """ + """ Scheduler for training. """ return self._scheduler @property def neural_net(self): - """ + """ Neural network for training. """ return self._neural_net @property def loss(self): - """ + """ Loss for training. """ return self._loss
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- - - +

+ Created using Sphinx 7.4.7. +
+

+ + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/pina/trainer.html b/_modules/pina/trainer.html index 8eb6ae83..f58d2a8e 100644 --- a/_modules/pina/trainer.html +++ b/_modules/pina/trainer.html @@ -1,93 +1,459 @@ + - - - - - pina.trainer — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + pina.trainer — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + +
- - -
- -
-
-
-
    -
  • - - -
  • -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + + +
    -
    -
    - + + + + + +
    +

    Source code for pina.trainer

     """ Trainer module. """
     
    @@ -98,10 +464,12 @@ 

    Source code for pina.trainer

     from .solvers.solver import SolverInterface
     
     
    -
    [docs]class Trainer(pytorch_lightning.Trainer): +
    +[docs] +class Trainer(pytorch_lightning.Trainer): def __init__(self, solver, batch_size=None, **kwargs): - """ + """ PINA Trainer class for costumizing every aspect of training via flags. :param solver: A pina:class:`SolverInterface` solver for the differential problem. @@ -129,7 +497,7 @@

    Source code for pina.trainer

             # create dataloader
             if solver.problem.have_sampled_points is False:
                 raise RuntimeError(
    -                f"Input points in {solver.problem.not_sampled_points} "
    +                f"Input points in {solver.problem.not_sampled_points} "
                     "training are None. Please "
                     "sample points in your problem by calling "
                     "discretise_domain function before train "
    @@ -139,7 +507,7 @@ 

    Source code for pina.trainer

             self._create_or_update_loader()
     
         def _create_or_update_loader(self):
    -        """
    +        """
             This method is used here because is resampling is needed
             during training, there is no need to define to touch the
             trainer dataloader, just call the method.
    @@ -162,49 +530,88 @@ 

    Source code for pina.trainer

                         pb.unknown_parameters[key].data.to(device)
                     )
     
    -
    [docs] def train(self, **kwargs): - """ +
    +[docs] + def train(self, **kwargs): + """ Train the solver method. """ return super().fit( self._model, train_dataloaders=self._loader, **kwargs )
    + @property def solver(self): - """ + """ Returning trainer solver. """ return self._model
    +
    -
    +
    + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + +
    +
+ + + + - Built with Sphinx using a - theme - provided by Read the Docs. - + -
-
- -
- + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/_modules/torch/nn/modules/module.html b/_modules/torch/nn/modules/module.html deleted file mode 100644 index 8b6d782c..00000000 --- a/_modules/torch/nn/modules/module.html +++ /dev/null @@ -1,2700 +0,0 @@ - - - - - - torch.nn.modules.module — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - - - - - - -
- - -
- -
-
-
- -
-
-
-
- -

Source code for torch.nn.modules.module

-from collections import OrderedDict, namedtuple
-import itertools
-import warnings
-import functools
-import weakref
-
-import torch
-from torch._prims_common import DeviceLikeType
-from ..parameter import Parameter
-import torch.utils.hooks as hooks
-
-from torch import Tensor, device, dtype
-from typing import Union, Tuple, Any, Callable, Iterator, Set, Optional, overload, TypeVar, Mapping, Dict, List
-from typing_extensions import Self
-from ...utils.hooks import RemovableHandle
-from torch.utils._python_dispatch import is_traceable_wrapper_subclass
-
-__all__ = ['register_module_forward_pre_hook', 'register_module_forward_hook',
-           'register_module_full_backward_pre_hook', 'register_module_backward_hook',
-           'register_module_full_backward_hook', 'register_module_buffer_registration_hook',
-           'register_module_module_registration_hook', 'register_module_parameter_registration_hook', 'Module']
-
-_grad_t = Union[Tuple[Tensor, ...], Tensor]
-# See https://mypy.readthedocs.io/en/latest/generics.html#generic-methods-and-generic-self for the use
-# of `T` to annotate `self`. Many methods of `Module` return `self` and we want those return values to be
-# the type of the subclass, not the looser type of `Module`.
-T = TypeVar('T', bound='Module')
-
-
-class _IncompatibleKeys(namedtuple('IncompatibleKeys', ['missing_keys', 'unexpected_keys'])):
-    def __repr__(self):
-        if not self.missing_keys and not self.unexpected_keys:
-            return '<All keys matched successfully>'
-        return super().__repr__()
-
-    __str__ = __repr__
-
-
-def _addindent(s_, numSpaces):
-    s = s_.split('\n')
-    # don't do anything for single-line stuff
-    if len(s) == 1:
-        return s_
-    first = s.pop(0)
-    s = [(numSpaces * ' ') + line for line in s]
-    s = '\n'.join(s)
-    s = first + '\n' + s
-    return s
-
-r"""This tracks hooks common to all modules that are executed immediately before
-.registering the buffer/module/parameter"""
-_global_buffer_registration_hooks: Dict[int, Callable] = OrderedDict()
-_global_module_registration_hooks: Dict[int, Callable] = OrderedDict()
-_global_parameter_registration_hooks: Dict[int, Callable] = OrderedDict()
-
-class _WrappedHook:
-    def __init__(self, hook: Callable, module: Optional["Module"] = None):
-        self.hook: Callable = hook
-        functools.update_wrapper(self, hook)
-
-        self.with_module: bool = False
-
-        if module is not None:
-            self.module: weakref.ReferenceType[Module] = weakref.ref(module)
-            self.with_module = True
-
-    def __call__(self, *args: Any, **kwargs: Any) -> Any:
-        if self.with_module:
-            module = self.module()
-            if module is None:
-                raise RuntimeError("You are trying to call the hook of a dead Module!")
-            return self.hook(module, *args, **kwargs)
-        return self.hook(*args, **kwargs)
-
-    def __getstate__(self) -> Dict:
-        result = {"hook": self.hook, "with_module": self.with_module}
-        if self.with_module:
-            result["module"] = self.module()
-
-        return result
-
-    def __setstate__(self, state: Dict):
-        self.hook = state["hook"]
-        self.with_module = state["with_module"]
-
-        if self.with_module:
-            if state["module"] is None:
-                raise RuntimeError("You are trying to revive the hook of a dead Module!")
-            self.module = weakref.ref(state["module"])
-
-
-r"""This tracks hooks common to all modules that are executed before/after
-calling forward and backward. This is global state used for debugging/profiling
-purposes"""
-_global_backward_pre_hooks: Dict[int, Callable] = OrderedDict()
-_global_backward_hooks: Dict[int, Callable] = OrderedDict()
-_global_is_full_backward_hook: Optional[bool] = None
-_global_forward_pre_hooks: Dict[int, Callable] = OrderedDict()
-_global_forward_hooks: Dict[int, Callable] = OrderedDict()
-_global_forward_hooks_always_called: Dict[int, bool] = OrderedDict()
-
-_EXTRA_STATE_KEY_SUFFIX = '_extra_state'
-
-
-def register_module_buffer_registration_hook(hook: Callable[..., None]) -> RemovableHandle:
-    r"""Register a buffer registration hook common to all modules.
-
-    .. warning ::
-
-        This adds global state to the `nn.Module` module
-
-    The hook will be called every time :func:`register_buffer` is invoked.
-    It should have the following signature::
-
-        hook(module, name, buffer) -> None or new buffer
-
-    The hook can modify the input or return a single modified value in the hook.
-
-    Returns:
-        :class:`torch.utils.hooks.RemovableHandle`:
-            a handle that can be used to remove the added hook by calling
-            ``handle.remove()``
-    """
-    handle = hooks.RemovableHandle(_global_buffer_registration_hooks)
-    _global_buffer_registration_hooks[handle.id] = hook
-    return handle
-
-
-def register_module_module_registration_hook(hook: Callable[..., None]) -> RemovableHandle:
-    r"""Register a module registration hook common to all modules.
-
-    .. warning ::
-
-        This adds global state to the `nn.Module` module
-
-    The hook will be called every time :func:`register_module` is invoked.
-    It should have the following signature::
-
-        hook(module, name, submodule) -> None or new submodule
-
-    The hook can modify the input or return a single modified value in the hook.
-
-    Returns:
-        :class:`torch.utils.hooks.RemovableHandle`:
-            a handle that can be used to remove the added hook by calling
-            ``handle.remove()``
-    """
-    handle = hooks.RemovableHandle(_global_module_registration_hooks)
-    _global_module_registration_hooks[handle.id] = hook
-    return handle
-
-
-def register_module_parameter_registration_hook(hook: Callable[..., None]) -> RemovableHandle:
-    r"""Register a parameter registration hook common to all modules.
-
-    .. warning ::
-
-        This adds global state to the `nn.Module` module
-
-    The hook will be called every time :func:`register_parameter` is invoked.
-    It should have the following signature::
-
-        hook(module, name, param) -> None or new parameter
-
-    The hook can modify the input or return a single modified value in the hook.
-
-    Returns:
-        :class:`torch.utils.hooks.RemovableHandle`:
-            a handle that can be used to remove the added hook by calling
-            ``handle.remove()``
-    """
-    handle = hooks.RemovableHandle(_global_parameter_registration_hooks)
-    _global_parameter_registration_hooks[handle.id] = hook
-    return handle
-
-
-def register_module_forward_pre_hook(hook: Callable[..., None]) -> RemovableHandle:
-    r"""Register a forward pre-hook common to all modules.
-
-    .. warning ::
-
-        This adds global state to the `nn.module` module
-        and it is only intended for debugging/profiling purposes.
-
-    The hook will be called every time before :func:`forward` is invoked.
-    It should have the following signature::
-
-        hook(module, input) -> None or modified input
-
-    The input contains only the positional arguments given to the module.
-    Keyword arguments won't be passed to the hooks and only to the ``forward``.
-    The hook can modify the input. User can either return a tuple or a
-    single modified value in the hook. We will wrap the value into a tuple
-    if a single value is returned(unless that value is already a tuple).
-
-    This hook has precedence over the specific module hooks registered with
-    ``register_forward_pre_hook``.
-
-    Returns:
-        :class:`torch.utils.hooks.RemovableHandle`:
-            a handle that can be used to remove the added hook by calling
-            ``handle.remove()``
-    """
-    handle = hooks.RemovableHandle(_global_forward_pre_hooks)
-    _global_forward_pre_hooks[handle.id] = hook
-    return handle
-
-
-def register_module_forward_hook(hook: Callable[..., None], *, always_call: bool = False) -> RemovableHandle:
-    r"""Register a global forward hook for all the modules.
-
-    .. warning ::
-
-        This adds global state to the `nn.module` module
-        and it is only intended for debugging/profiling purposes.
-
-    The hook will be called every time after :func:`forward` has computed an output.
-    It should have the following signature::
-
-        hook(module, input, output) -> None or modified output
-
-    The input contains only the positional arguments given to the module.
-    Keyword arguments won't be passed to the hooks and only to the ``forward``.
-    The hook can modify the output. It can modify the input inplace but
-    it will not have effect on forward since this is called after
-    :func:`forward` is called.
-
-    Parameters:
-        hook (Callable): The user defined hook to be registered.
-        always_call (bool): If ``True`` the ``hook`` will be run regardless of
-            whether an exception is raised while calling the Module.
-            Default: ``False``
-    Returns:
-        :class:`torch.utils.hooks.RemovableHandle`:
-            a handle that can be used to remove the added hook by calling
-            ``handle.remove()``
-
-    This hook will be executed before specific module hooks registered with
-    ``register_forward_hook``.
-    """
-    handle = hooks.RemovableHandle(_global_forward_hooks,
-                                   extra_dict=_global_forward_hooks_always_called)
-    _global_forward_hooks[handle.id] = hook
-    if always_call:
-        _global_forward_hooks_always_called[handle.id] = True
-    return handle
-
-
-def register_module_backward_hook(
-    hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]]
-) -> RemovableHandle:
-    r"""Register a backward hook common to all the modules.
-
-    This function is deprecated in favor of
-    :func:`torch.nn.modules.module.register_module_full_backward_hook`
-    and the behavior of this function will change in future versions.
-
-    Returns:
-        :class:`torch.utils.hooks.RemovableHandle`:
-            a handle that can be used to remove the added hook by calling
-            ``handle.remove()``
-
-    """
-    global _global_is_full_backward_hook
-    if _global_is_full_backward_hook is True:
-        raise RuntimeError("Cannot use both regular backward hooks and full backward hooks as a "
-                           "global Module hook. Please use only one of them.")
-
-    _global_is_full_backward_hook = False
-
-    handle = hooks.RemovableHandle(_global_backward_hooks)
-    _global_backward_hooks[handle.id] = hook
-    return handle
-
-
-def register_module_full_backward_pre_hook(
-    hook: Callable[['Module', _grad_t], Union[None, _grad_t]]
-) -> RemovableHandle:
-    r"""Register a backward pre-hook common to all the modules.
-
-    .. warning ::
-        This adds global state to the `nn.module` module
-        and it is only intended for debugging/profiling purposes.
-
-    Hooks registered using this function behave in the same way as those
-    registered by :meth:`torch.nn.Module.register_full_backward_pre_hook`.
-    Refer to its documentation for more details.
-
-    Hooks registered using this function will be called before hooks registered
-    using :meth:`torch.nn.Module.register_full_backward_pre_hook`.
-
-    Returns:
-        :class:`torch.utils.hooks.RemovableHandle`:
-            a handle that can be used to remove the added hook by calling
-            ``handle.remove()``
-
-    """
-    handle = hooks.RemovableHandle(_global_backward_pre_hooks)
-    _global_backward_pre_hooks[handle.id] = hook
-    return handle
-
-
-def register_module_full_backward_hook(
-    hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]]
-) -> RemovableHandle:
-    r"""Register a backward hook common to all the modules.
-
-    .. warning ::
-        This adds global state to the `nn.module` module
-        and it is only intended for debugging/profiling purposes.
-
-    Hooks registered using this function behave in the same way as those
-    registered by :meth:`torch.nn.Module.register_full_backward_hook`.
-    Refer to its documentation for more details.
-
-    Hooks registered using this function will be called before hooks registered
-    using :meth:`torch.nn.Module.register_full_backward_hook`.
-
-    Returns:
-        :class:`torch.utils.hooks.RemovableHandle`:
-            a handle that can be used to remove the added hook by calling
-            ``handle.remove()``
-
-    """
-    global _global_is_full_backward_hook
-    if _global_is_full_backward_hook is False:
-        raise RuntimeError("Cannot use both regular backward hooks and full backward hooks as a "
-                           "global Module hook. Please use only one of them.")
-
-    _global_is_full_backward_hook = True
-
-    handle = hooks.RemovableHandle(_global_backward_hooks)
-    _global_backward_hooks[handle.id] = hook
-    return handle
-
-
-# Trick mypy into not applying contravariance rules to inputs by defining
-# forward as a value, rather than a function.  See also
-# https://github.com/python/mypy/issues/8795
-def _forward_unimplemented(self, *input: Any) -> None:
-    r"""Define the computation performed at every call.
-
-    Should be overridden by all subclasses.
-
-    .. note::
-        Although the recipe for forward pass needs to be defined within
-        this function, one should call the :class:`Module` instance afterwards
-        instead of this since the former takes care of running the
-        registered hooks while the latter silently ignores them.
-    """
-    raise NotImplementedError(f"Module [{type(self).__name__}] is missing the required \"forward\" function")
-
-
-class Module:
-    r"""Base class for all neural network modules.
-
-    Your models should also subclass this class.
-
-    Modules can also contain other Modules, allowing to nest them in
-    a tree structure. You can assign the submodules as regular attributes::
-
-        import torch.nn as nn
-        import torch.nn.functional as F
-
-        class Model(nn.Module):
-            def __init__(self):
-                super().__init__()
-                self.conv1 = nn.Conv2d(1, 20, 5)
-                self.conv2 = nn.Conv2d(20, 20, 5)
-
-            def forward(self, x):
-                x = F.relu(self.conv1(x))
-                return F.relu(self.conv2(x))
-
-    Submodules assigned in this way will be registered, and will have their
-    parameters converted too when you call :meth:`to`, etc.
-
-    .. note::
-        As per the example above, an ``__init__()`` call to the parent class
-        must be made before assignment on the child.
-
-    :ivar training: Boolean represents whether this module is in training or
-                    evaluation mode.
-    :vartype training: bool
-    """
-
-    dump_patches: bool = False
-
-    _version: int = 1
-    r"""This allows better BC support for :meth:`load_state_dict`. In
-    :meth:`state_dict`, the version number will be saved as in the attribute
-    `_metadata` of the returned state dict, and thus pickled. `_metadata` is a
-    dictionary with keys that follow the naming convention of state dict. See
-    ``_load_from_state_dict`` on how to use this information in loading.
-
-    If new parameters/buffers are added/removed from a module, this number shall
-    be bumped, and the module's `_load_from_state_dict` method can compare the
-    version number and do appropriate changes if the state dict is from before
-    the change."""
-
-    training: bool
-    _parameters: Dict[str, Optional[Parameter]]
-    _buffers: Dict[str, Optional[Tensor]]
-    _non_persistent_buffers_set: Set[str]
-    _backward_pre_hooks: Dict[int, Callable]
-    _backward_hooks: Dict[int, Callable]
-    _is_full_backward_hook: Optional[bool]
-    _forward_hooks: Dict[int, Callable]
-    # Marks whether the corresponding _forward_hooks accept kwargs or not.
-    # As JIT does not support Set[int], this dict is used as a set, where all
-    # hooks represented in this dict accept kwargs.
-    _forward_hooks_with_kwargs: Dict[int, bool]
-    # forward hooks that should always be called even if an exception is raised
-    _forward_hooks_always_called: Dict[int, bool]
-    _forward_pre_hooks: Dict[int, Callable]
-    # Marks whether the corresponding _forward_hooks accept kwargs or not.
-    # As JIT does not support Set[int], this dict is used as a set, where all
-    # hooks represented in this dict accept kwargs.
-    _forward_pre_hooks_with_kwargs: Dict[int, bool]
-    _state_dict_hooks: Dict[int, Callable]
-    _load_state_dict_pre_hooks: Dict[int, Callable]
-    _state_dict_pre_hooks: Dict[int, Callable]
-    _load_state_dict_post_hooks: Dict[int, Callable]
-    _modules: Dict[str, Optional['Module']]
-    call_super_init: bool = False
-    _compiled_call_impl : Optional[Callable] = None
-
-    def __init__(self, *args, **kwargs) -> None:
-        """Initialize internal Module state, shared by both nn.Module and ScriptModule."""
-        torch._C._log_api_usage_once("python.nn_module")
-
-        # Backward compatibility: no args used to be allowed when call_super_init=False
-        if self.call_super_init is False and bool(kwargs):
-            raise TypeError("{}.__init__() got an unexpected keyword argument '{}'"
-                            "".format(type(self).__name__, next(iter(kwargs))))
-
-        if self.call_super_init is False and bool(args):
-            raise TypeError(f"{type(self).__name__}.__init__() takes 1 positional argument but {len(args) + 1} were"
-                            " given")
-
-        """
-        Calls super().__setattr__('a', a) instead of the typical self.a = a
-        to avoid Module.__setattr__ overhead. Module's __setattr__ has special
-        handling for parameters, submodules, and buffers but simply calls into
-        super().__setattr__ for all other attributes.
-        """
-        super().__setattr__('training', True)
-        super().__setattr__('_parameters', OrderedDict())
-        super().__setattr__('_buffers', OrderedDict())
-        super().__setattr__('_non_persistent_buffers_set', set())
-        super().__setattr__('_backward_pre_hooks', OrderedDict())
-        super().__setattr__('_backward_hooks', OrderedDict())
-        super().__setattr__('_is_full_backward_hook', None)
-        super().__setattr__('_forward_hooks', OrderedDict())
-        super().__setattr__('_forward_hooks_with_kwargs', OrderedDict())
-        super().__setattr__('_forward_hooks_always_called', OrderedDict())
-        super().__setattr__('_forward_pre_hooks', OrderedDict())
-        super().__setattr__('_forward_pre_hooks_with_kwargs', OrderedDict())
-        super().__setattr__('_state_dict_hooks', OrderedDict())
-        super().__setattr__('_state_dict_pre_hooks', OrderedDict())
-        super().__setattr__('_load_state_dict_pre_hooks', OrderedDict())
-        super().__setattr__('_load_state_dict_post_hooks', OrderedDict())
-        super().__setattr__('_modules', OrderedDict())
-
-        if self.call_super_init:
-            super().__init__(*args, **kwargs)
-
-    forward: Callable[..., Any] = _forward_unimplemented
-
-    def register_buffer(self, name: str, tensor: Optional[Tensor], persistent: bool = True) -> None:
-        r"""Add a buffer to the module.
-
-        This is typically used to register a buffer that should not to be
-        considered a model parameter. For example, BatchNorm's ``running_mean``
-        is not a parameter, but is part of the module's state. Buffers, by
-        default, are persistent and will be saved alongside parameters. This
-        behavior can be changed by setting :attr:`persistent` to ``False``. The
-        only difference between a persistent buffer and a non-persistent buffer
-        is that the latter will not be a part of this module's
-        :attr:`state_dict`.
-
-        Buffers can be accessed as attributes using given names.
-
-        Args:
-            name (str): name of the buffer. The buffer can be accessed
-                from this module using the given name
-            tensor (Tensor or None): buffer to be registered. If ``None``, then operations
-                that run on buffers, such as :attr:`cuda`, are ignored. If ``None``,
-                the buffer is **not** included in the module's :attr:`state_dict`.
-            persistent (bool): whether the buffer is part of this module's
-                :attr:`state_dict`.
-
-        Example::
-
-            >>> # xdoctest: +SKIP("undefined vars")
-            >>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-        """
-        if persistent is False and isinstance(self, torch.jit.ScriptModule):
-            raise RuntimeError("ScriptModule does not support non-persistent buffers")
-
-        if '_buffers' not in self.__dict__:
-            raise AttributeError(
-                "cannot assign buffer before Module.__init__() call")
-        elif not isinstance(name, str):
-            raise TypeError(f"buffer name should be a string. Got {torch.typename(name)}")
-        elif '.' in name:
-            raise KeyError("buffer name can't contain \".\"")
-        elif name == '':
-            raise KeyError("buffer name can't be empty string \"\"")
-        elif hasattr(self, name) and name not in self._buffers:
-            raise KeyError(f"attribute '{name}' already exists")
-        elif tensor is not None and not isinstance(tensor, torch.Tensor):
-            raise TypeError(f"cannot assign '{torch.typename(tensor)}' object to buffer '{name}' "
-                            "(torch Tensor or None required)"
-                            )
-        else:
-            for hook in _global_buffer_registration_hooks.values():
-                output = hook(self, name, tensor)
-                if output is not None:
-                    tensor = output
-            self._buffers[name] = tensor
-            if persistent:
-                self._non_persistent_buffers_set.discard(name)
-            else:
-                self._non_persistent_buffers_set.add(name)
-
-    def register_parameter(self, name: str, param: Optional[Parameter]) -> None:
-        r"""Add a parameter to the module.
-
-        The parameter can be accessed as an attribute using given name.
-
-        Args:
-            name (str): name of the parameter. The parameter can be accessed
-                from this module using the given name
-            param (Parameter or None): parameter to be added to the module. If
-                ``None``, then operations that run on parameters, such as :attr:`cuda`,
-                are ignored. If ``None``, the parameter is **not** included in the
-                module's :attr:`state_dict`.
-        """
-        if '_parameters' not in self.__dict__:
-            raise AttributeError(
-                "cannot assign parameter before Module.__init__() call")
-
-        elif not isinstance(name, str):
-            raise TypeError(f"parameter name should be a string. Got {torch.typename(name)}")
-        elif '.' in name:
-            raise KeyError("parameter name can't contain \".\"")
-        elif name == '':
-            raise KeyError("parameter name can't be empty string \"\"")
-        elif hasattr(self, name) and name not in self._parameters:
-            raise KeyError(f"attribute '{name}' already exists")
-
-        if param is None:
-            self._parameters[name] = None
-        elif not isinstance(param, Parameter):
-            raise TypeError(f"cannot assign '{torch.typename(param)}' object to parameter '{name}' "
-                            "(torch.nn.Parameter or None required)"
-                            )
-        elif param.grad_fn:
-            raise ValueError(
-                f"Cannot assign non-leaf Tensor to parameter '{name}'. Model "
-                f"parameters must be created explicitly. To express '{name}' "
-                "as a function of another Tensor, compute the value in "
-                "the forward() method.")
-        else:
-            for hook in _global_parameter_registration_hooks.values():
-                output = hook(self, name, param)
-                if output is not None:
-                    param = output
-            self._parameters[name] = param
-
-    def add_module(self, name: str, module: Optional['Module']) -> None:
-        r"""Add a child module to the current module.
-
-        The module can be accessed as an attribute using the given name.
-
-        Args:
-            name (str): name of the child module. The child module can be
-                accessed from this module using the given name
-            module (Module): child module to be added to the module.
-        """
-        if not isinstance(module, Module) and module is not None:
-            raise TypeError(f"{torch.typename(module)} is not a Module subclass")
-        elif not isinstance(name, str):
-            raise TypeError(f"module name should be a string. Got {torch.typename(name)}")
-        elif hasattr(self, name) and name not in self._modules:
-            raise KeyError(f"attribute '{name}' already exists")
-        elif '.' in name:
-            raise KeyError(f"module name can't contain \".\", got: {name}")
-        elif name == '':
-            raise KeyError("module name can't be empty string \"\"")
-        for hook in _global_module_registration_hooks.values():
-            output = hook(self, name, module)
-            if output is not None:
-                module = output
-        self._modules[name] = module
-
-    def register_module(self, name: str, module: Optional['Module']) -> None:
-        r"""Alias for :func:`add_module`."""
-        self.add_module(name, module)
-
-    def get_submodule(self, target: str) -> "Module":
-        """Return the submodule given by ``target`` if it exists, otherwise throw an error.
-
-        For example, let's say you have an ``nn.Module`` ``A`` that
-        looks like this:
-
-        .. code-block:: text
-
-            A(
-                (net_b): Module(
-                    (net_c): Module(
-                        (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-                    )
-                    (linear): Linear(in_features=100, out_features=200, bias=True)
-                )
-            )
-
-        (The diagram shows an ``nn.Module`` ``A``. ``A`` has a nested
-        submodule ``net_b``, which itself has two submodules ``net_c``
-        and ``linear``. ``net_c`` then has a submodule ``conv``.)
-
-        To check whether or not we have the ``linear`` submodule, we
-        would call ``get_submodule("net_b.linear")``. To check whether
-        we have the ``conv`` submodule, we would call
-        ``get_submodule("net_b.net_c.conv")``.
-
-        The runtime of ``get_submodule`` is bounded by the degree
-        of module nesting in ``target``. A query against
-        ``named_modules`` achieves the same result, but it is O(N) in
-        the number of transitive modules. So, for a simple check to see
-        if some submodule exists, ``get_submodule`` should always be
-        used.
-
-        Args:
-            target: The fully-qualified string name of the submodule
-                to look for. (See above example for how to specify a
-                fully-qualified string.)
-
-        Returns:
-            torch.nn.Module: The submodule referenced by ``target``
-
-        Raises:
-            AttributeError: If the target string references an invalid
-                path or resolves to something that is not an
-                ``nn.Module``
-        """
-        if target == "":
-            return self
-
-        atoms: List[str] = target.split(".")
-        mod: torch.nn.Module = self
-
-        for item in atoms:
-
-            if not hasattr(mod, item):
-                raise AttributeError(mod._get_name() + " has no "
-                                     "attribute `" + item + "`")
-
-            mod = getattr(mod, item)
-
-            if not isinstance(mod, torch.nn.Module):
-                raise AttributeError("`" + item + "` is not "
-                                     "an nn.Module")
-
-        return mod
-
-    def get_parameter(self, target: str) -> "Parameter":
-        """Return the parameter given by ``target`` if it exists, otherwise throw an error.
-
-        See the docstring for ``get_submodule`` for a more detailed
-        explanation of this method's functionality as well as how to
-        correctly specify ``target``.
-
-        Args:
-            target: The fully-qualified string name of the Parameter
-                to look for. (See ``get_submodule`` for how to specify a
-                fully-qualified string.)
-
-        Returns:
-            torch.nn.Parameter: The Parameter referenced by ``target``
-
-        Raises:
-            AttributeError: If the target string references an invalid
-                path or resolves to something that is not an
-                ``nn.Parameter``
-        """
-        module_path, _, param_name = target.rpartition(".")
-
-        mod: torch.nn.Module = self.get_submodule(module_path)
-
-        if not hasattr(mod, param_name):
-            raise AttributeError(mod._get_name() + " has no attribute `"
-                                 + param_name + "`")
-
-        param: torch.nn.Parameter = getattr(mod, param_name)
-
-        if not isinstance(param, torch.nn.Parameter):
-            raise AttributeError("`" + param_name + "` is not an "
-                                 "nn.Parameter")
-
-        return param
-
-    def get_buffer(self, target: str) -> "Tensor":
-        """Return the buffer given by ``target`` if it exists, otherwise throw an error.
-
-        See the docstring for ``get_submodule`` for a more detailed
-        explanation of this method's functionality as well as how to
-        correctly specify ``target``.
-
-        Args:
-            target: The fully-qualified string name of the buffer
-                to look for. (See ``get_submodule`` for how to specify a
-                fully-qualified string.)
-
-        Returns:
-            torch.Tensor: The buffer referenced by ``target``
-
-        Raises:
-            AttributeError: If the target string references an invalid
-                path or resolves to something that is not a
-                buffer
-        """
-        module_path, _, buffer_name = target.rpartition(".")
-
-        mod: torch.nn.Module = self.get_submodule(module_path)
-
-        if not hasattr(mod, buffer_name):
-            raise AttributeError(mod._get_name() + " has no attribute `"
-                                 + buffer_name + "`")
-
-        buffer: torch.Tensor = getattr(mod, buffer_name)
-
-        if buffer_name not in mod._buffers:
-            raise AttributeError("`" + buffer_name + "` is not a buffer")
-
-        return buffer
-
-    def get_extra_state(self) -> Any:
-        """Return any extra state to include in the module's state_dict.
-
-        Implement this and a corresponding :func:`set_extra_state` for your module
-        if you need to store extra state. This function is called when building the
-        module's `state_dict()`.
-
-        Note that extra state should be picklable to ensure working serialization
-        of the state_dict. We only provide provide backwards compatibility guarantees
-        for serializing Tensors; other objects may break backwards compatibility if
-        their serialized pickled form changes.
-
-        Returns:
-            object: Any extra state to store in the module's state_dict
-        """
-        raise RuntimeError(
-            "Reached a code path in Module.get_extra_state() that should never be called. "
-            "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml "
-            "to report this bug.")
-
-    def set_extra_state(self, state: Any) -> None:
-        """Set extra state contained in the loaded `state_dict`.
-
-        This function is called from :func:`load_state_dict` to handle any extra state
-        found within the `state_dict`. Implement this function and a corresponding
-        :func:`get_extra_state` for your module if you need to store extra state within its
-        `state_dict`.
-
-        Args:
-            state (dict): Extra state from the `state_dict`
-        """
-        raise RuntimeError(
-            "Reached a code path in Module.set_extra_state() that should never be called. "
-            "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml "
-            "to report this bug.")
-
-    def _apply(self, fn, recurse=True):
-        if recurse:
-            for module in self.children():
-                module._apply(fn)
-
-        def compute_should_use_set_data(tensor, tensor_applied):
-            if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
-                # If the new tensor has compatible tensor type as the existing tensor,
-                # the current behavior is to change the tensor in-place using `.data =`,
-                # and the future behavior is to overwrite the existing tensor. However,
-                # changing the current behavior is a BC-breaking change, and we want it
-                # to happen in future releases. So for now we introduce the
-                # `torch.__future__.get_overwrite_module_params_on_conversion()`
-                # global flag to let the user control whether they want the future
-                # behavior of overwriting the existing tensor or not.
-                return not torch.__future__.get_overwrite_module_params_on_conversion()
-            else:
-                return False
-
-        should_use_swap_tensors = torch.__future__.get_swap_module_params_on_conversion()
-
-        for key, param in self._parameters.items():
-            if param is None:
-                continue
-            # Tensors stored in modules are graph leaves, and we don't want to
-            # track autograd history of `param_applied`, so we have to use
-            # `with torch.no_grad():`
-            with torch.no_grad():
-                param_applied = fn(param)
-            p_should_use_set_data = compute_should_use_set_data(param, param_applied)
-
-            # subclasses may have multiple child tensors so we need to use swap_tensors
-            p_should_use_swap_tensors = should_use_swap_tensors or is_traceable_wrapper_subclass(param_applied)
-
-            param_grad = param.grad
-            if p_should_use_swap_tensors:
-                try:
-                    if param_grad is not None:
-                        # Accessing param.grad makes its at::Tensor's use_count 2, which will prevent swapping.
-                        # Decrement use count of the gradient by setting to None
-                        param.grad = None
-                    param_applied = torch.nn.Parameter(param_applied, requires_grad=param.requires_grad)
-                    torch.utils.swap_tensors(param, param_applied)
-                except Exception as e:
-                    if param_grad is not None:
-                        param.grad = param_grad
-                    raise RuntimeError(f"_apply(): Couldn't swap {self._get_name()}.{key}") from e
-                out_param = param
-            elif p_should_use_set_data:
-                param.data = param_applied
-                out_param = param
-            else:
-                assert isinstance(param, Parameter)
-                assert param.is_leaf
-                out_param = Parameter(param_applied, param.requires_grad)
-                self._parameters[key] = out_param
-
-            if param_grad is not None:
-                with torch.no_grad():
-                    grad_applied = fn(param_grad)
-                g_should_use_set_data = compute_should_use_set_data(param_grad, grad_applied)
-                if p_should_use_swap_tensors:
-                    grad_applied.requires_grad_(param_grad.requires_grad)
-                    try:
-                        torch.utils.swap_tensors(param_grad, grad_applied)
-                    except Exception as e:
-                        raise RuntimeError(f"_apply(): Couldn't swap {self._get_name()}.{key}.grad") from e
-                    out_param.grad = param_grad
-                elif g_should_use_set_data:
-                    assert out_param.grad is not None
-                    out_param.grad.data = grad_applied
-                else:
-                    assert param_grad.is_leaf
-                    out_param.grad = grad_applied.requires_grad_(param_grad.requires_grad)
-
-        for key, buf in self._buffers.items():
-            if buf is not None:
-                self._buffers[key] = fn(buf)
-
-        return self
-
-    def apply(self: T, fn: Callable[['Module'], None]) -> T:
-        r"""Apply ``fn`` recursively to every submodule (as returned by ``.children()``) as well as self.
-
-        Typical use includes initializing the parameters of a model
-        (see also :ref:`nn-init-doc`).
-
-        Args:
-            fn (:class:`Module` -> None): function to be applied to each submodule
-
-        Returns:
-            Module: self
-
-        Example::
-
-            >>> @torch.no_grad()
-            >>> def init_weights(m):
-            >>>     print(m)
-            >>>     if type(m) == nn.Linear:
-            >>>         m.weight.fill_(1.0)
-            >>>         print(m.weight)
-            >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
-            >>> net.apply(init_weights)
-            Linear(in_features=2, out_features=2, bias=True)
-            Parameter containing:
-            tensor([[1., 1.],
-                    [1., 1.]], requires_grad=True)
-            Linear(in_features=2, out_features=2, bias=True)
-            Parameter containing:
-            tensor([[1., 1.],
-                    [1., 1.]], requires_grad=True)
-            Sequential(
-              (0): Linear(in_features=2, out_features=2, bias=True)
-              (1): Linear(in_features=2, out_features=2, bias=True)
-            )
-
-        """
-        for module in self.children():
-            module.apply(fn)
-        fn(self)
-        return self
-
-    def cuda(self: T, device: Optional[Union[int, device]] = None) -> T:
-        r"""Move all model parameters and buffers to the GPU.
-
-        This also makes associated parameters and buffers different objects. So
-        it should be called before constructing optimizer if the module will
-        live on GPU while being optimized.
-
-        .. note::
-            This method modifies the module in-place.
-
-        Args:
-            device (int, optional): if specified, all parameters will be
-                copied to that device
-
-        Returns:
-            Module: self
-        """
-        return self._apply(lambda t: t.cuda(device))
-
-    def ipu(self: T, device: Optional[Union[int, device]] = None) -> T:
-        r"""Move all model parameters and buffers to the IPU.
-
-        This also makes associated parameters and buffers different objects. So
-        it should be called before constructing optimizer if the module will
-        live on IPU while being optimized.
-
-        .. note::
-            This method modifies the module in-place.
-
-        Arguments:
-            device (int, optional): if specified, all parameters will be
-                copied to that device
-
-        Returns:
-            Module: self
-        """
-        return self._apply(lambda t: t.ipu(device))
-
-    def xpu(self: T, device: Optional[Union[int, device]] = None) -> T:
-        r"""Move all model parameters and buffers to the XPU.
-
-        This also makes associated parameters and buffers different objects. So
-        it should be called before constructing optimizer if the module will
-        live on XPU while being optimized.
-
-        .. note::
-            This method modifies the module in-place.
-
-        Arguments:
-            device (int, optional): if specified, all parameters will be
-                copied to that device
-
-        Returns:
-            Module: self
-        """
-        return self._apply(lambda t: t.xpu(device))
-
-    def cpu(self: T) -> T:
-        r"""Move all model parameters and buffers to the CPU.
-
-        .. note::
-            This method modifies the module in-place.
-
-        Returns:
-            Module: self
-        """
-        return self._apply(lambda t: t.cpu())
-
-    def type(self: T, dst_type: Union[dtype, str]) -> T:
-        r"""Casts all parameters and buffers to :attr:`dst_type`.
-
-        .. note::
-            This method modifies the module in-place.
-
-        Args:
-            dst_type (type or string): the desired type
-
-        Returns:
-            Module: self
-        """
-        return self._apply(lambda t: t.type(dst_type))
-
-    def float(self: T) -> T:
-        r"""Casts all floating point parameters and buffers to ``float`` datatype.
-
-        .. note::
-            This method modifies the module in-place.
-
-        Returns:
-            Module: self
-        """
-        return self._apply(lambda t: t.float() if t.is_floating_point() else t)
-
-    def double(self: T) -> T:
-        r"""Casts all floating point parameters and buffers to ``double`` datatype.
-
-        .. note::
-            This method modifies the module in-place.
-
-        Returns:
-            Module: self
-        """
-        return self._apply(lambda t: t.double() if t.is_floating_point() else t)
-
-    def half(self: T) -> T:
-        r"""Casts all floating point parameters and buffers to ``half`` datatype.
-
-        .. note::
-            This method modifies the module in-place.
-
-        Returns:
-            Module: self
-        """
-        return self._apply(lambda t: t.half() if t.is_floating_point() else t)
-
-    def bfloat16(self: T) -> T:
-        r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype.
-
-        .. note::
-            This method modifies the module in-place.
-
-        Returns:
-            Module: self
-        """
-        return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t)
-
-    def to_empty(self: T, *, device: Optional[DeviceLikeType], recurse: bool = True) -> T:
-        r"""Move the parameters and buffers to the specified device without copying storage.
-
-        Args:
-            device (:class:`torch.device`): The desired device of the parameters
-                and buffers in this module.
-            recurse (bool): Whether parameters and buffers of submodules should
-                be recursively moved to the specified device.
-
-        Returns:
-            Module: self
-        """
-        return self._apply(lambda t: torch.empty_like(t, device=device), recurse=recurse)
-
-    @overload
-    def to(self, device: Optional[DeviceLikeType] = ..., dtype: Optional[dtype] = ...,
-           non_blocking: bool = ...) -> Self:
-        ...
-
-    @overload
-    def to(self, dtype: dtype, non_blocking: bool = ...) -> Self:
-        ...
-
-    @overload
-    def to(self, tensor: Tensor, non_blocking: bool = ...) -> Self:
-        ...
-
-    def to(self, *args, **kwargs):
-        r"""Move and/or cast the parameters and buffers.
-
-        This can be called as
-
-        .. function:: to(device=None, dtype=None, non_blocking=False)
-           :noindex:
-
-        .. function:: to(dtype, non_blocking=False)
-           :noindex:
-
-        .. function:: to(tensor, non_blocking=False)
-           :noindex:
-
-        .. function:: to(memory_format=torch.channels_last)
-           :noindex:
-
-        Its signature is similar to :meth:`torch.Tensor.to`, but only accepts
-        floating point or complex :attr:`dtype`\ s. In addition, this method will
-        only cast the floating point or complex parameters and buffers to :attr:`dtype`
-        (if given). The integral parameters and buffers will be moved
-        :attr:`device`, if that is given, but with dtypes unchanged. When
-        :attr:`non_blocking` is set, it tries to convert/move asynchronously
-        with respect to the host if possible, e.g., moving CPU Tensors with
-        pinned memory to CUDA devices.
-
-        See below for examples.
-
-        .. note::
-            This method modifies the module in-place.
-
-        Args:
-            device (:class:`torch.device`): the desired device of the parameters
-                and buffers in this module
-            dtype (:class:`torch.dtype`): the desired floating point or complex dtype of
-                the parameters and buffers in this module
-            tensor (torch.Tensor): Tensor whose dtype and device are the desired
-                dtype and device for all parameters and buffers in this module
-            memory_format (:class:`torch.memory_format`): the desired memory
-                format for 4D parameters and buffers in this module (keyword
-                only argument)
-
-        Returns:
-            Module: self
-
-        Examples::
-
-            >>> # xdoctest: +IGNORE_WANT("non-deterministic")
-            >>> linear = nn.Linear(2, 2)
-            >>> linear.weight
-            Parameter containing:
-            tensor([[ 0.1913, -0.3420],
-                    [-0.5113, -0.2325]])
-            >>> linear.to(torch.double)
-            Linear(in_features=2, out_features=2, bias=True)
-            >>> linear.weight
-            Parameter containing:
-            tensor([[ 0.1913, -0.3420],
-                    [-0.5113, -0.2325]], dtype=torch.float64)
-            >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
-            >>> gpu1 = torch.device("cuda:1")
-            >>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-            Linear(in_features=2, out_features=2, bias=True)
-            >>> linear.weight
-            Parameter containing:
-            tensor([[ 0.1914, -0.3420],
-                    [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
-            >>> cpu = torch.device("cpu")
-            >>> linear.to(cpu)
-            Linear(in_features=2, out_features=2, bias=True)
-            >>> linear.weight
-            Parameter containing:
-            tensor([[ 0.1914, -0.3420],
-                    [-0.5112, -0.2324]], dtype=torch.float16)
-
-            >>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
-            >>> linear.weight
-            Parameter containing:
-            tensor([[ 0.3741+0.j,  0.2382+0.j],
-                    [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
-            >>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-            tensor([[0.6122+0.j, 0.1150+0.j],
-                    [0.6122+0.j, 0.1150+0.j],
-                    [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-        """
-        device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
-
-        if dtype is not None:
-            if not (dtype.is_floating_point or dtype.is_complex):
-                raise TypeError('nn.Module.to only accepts floating point or complex '
-                                f'dtypes, but got desired dtype={dtype}')
-            if dtype.is_complex:
-                warnings.warn(
-                    "Complex modules are a new feature under active development whose design may change, "
-                    "and some modules might not work as expected when using complex tensors as parameters or buffers. "
-                    "Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml "
-                    "if a complex module does not work as expected.")
-
-        def convert(t):
-            try:
-                if convert_to_format is not None and t.dim() in (4, 5):
-                    return t.to(
-                        device,
-                        dtype if t.is_floating_point() or t.is_complex() else None,
-                        non_blocking,
-                        memory_format=convert_to_format,
-                    )
-                return t.to(
-                    device,
-                    dtype if t.is_floating_point() or t.is_complex() else None,
-                    non_blocking,
-                )
-            except NotImplementedError as e:
-                if str(e) == "Cannot copy out of meta tensor; no data!":
-                    raise NotImplementedError(
-                        f"{e} Please use torch.nn.Module.to_empty() instead of torch.nn.Module.to() "
-                        f"when moving module from meta to a different device."
-                    ) from None
-                else:
-                    raise
-
-        return self._apply(convert)
-
-    def register_full_backward_pre_hook(
-        self,
-        hook: Callable[["Module", _grad_t], Union[None, _grad_t]],
-        prepend: bool = False,
-    ) -> RemovableHandle:
-        r"""Register a backward pre-hook on the module.
-
-        The hook will be called every time the gradients for the module are computed.
-        The hook should have the following signature::
-
-            hook(module, grad_output) -> tuple[Tensor] or None
-
-        The :attr:`grad_output` is a tuple. The hook should
-        not modify its arguments, but it can optionally return a new gradient with
-        respect to the output that will be used in place of :attr:`grad_output` in
-        subsequent computations. Entries in :attr:`grad_output` will be ``None`` for
-        all non-Tensor arguments.
-
-        For technical reasons, when this hook is applied to a Module, its forward function will
-        receive a view of each Tensor passed to the Module. Similarly the caller will receive a view
-        of each Tensor returned by the Module's forward function.
-
-        .. warning ::
-            Modifying inputs inplace is not allowed when using backward hooks and
-            will raise an error.
-
-        Args:
-            hook (Callable): The user-defined hook to be registered.
-            prepend (bool): If true, the provided ``hook`` will be fired before
-                all existing ``backward_pre`` hooks on this
-                :class:`torch.nn.modules.Module`. Otherwise, the provided
-                ``hook`` will be fired after all existing ``backward_pre`` hooks
-                on this :class:`torch.nn.modules.Module`. Note that global
-                ``backward_pre`` hooks registered with
-                :func:`register_module_full_backward_pre_hook` will fire before
-                all hooks registered by this method.
-
-        Returns:
-            :class:`torch.utils.hooks.RemovableHandle`:
-                a handle that can be used to remove the added hook by calling
-                ``handle.remove()``
-
-        """
-        handle = hooks.RemovableHandle(self._backward_pre_hooks)
-        self._backward_pre_hooks[handle.id] = hook
-        if prepend:
-            self._backward_pre_hooks.move_to_end(handle.id, last=False)  # type: ignore[attr-defined]
-        return handle
-
-    def register_backward_hook(
-        self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]]
-    ) -> RemovableHandle:
-        r"""Register a backward hook on the module.
-
-        This function is deprecated in favor of :meth:`~torch.nn.Module.register_full_backward_hook` and
-        the behavior of this function will change in future versions.
-
-        Returns:
-            :class:`torch.utils.hooks.RemovableHandle`:
-                a handle that can be used to remove the added hook by calling
-                ``handle.remove()``
-
-        """
-        if self._is_full_backward_hook is True:
-            raise RuntimeError("Cannot use both regular backward hooks and full backward hooks on a "
-                               "single Module. Please use only one of them.")
-
-        self._is_full_backward_hook = False
-
-        handle = hooks.RemovableHandle(self._backward_hooks)
-        self._backward_hooks[handle.id] = hook
-        return handle
-
-    def register_full_backward_hook(
-        self,
-        hook: Callable[["Module", _grad_t, _grad_t], Union[None, _grad_t]],
-        prepend: bool = False,
-    ) -> RemovableHandle:
-        r"""Register a backward hook on the module.
-
-        The hook will be called every time the gradients with respect to a module
-        are computed, i.e. the hook will execute if and only if the gradients with
-        respect to module outputs are computed. The hook should have the following
-        signature::
-
-            hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-        The :attr:`grad_input` and :attr:`grad_output` are tuples that contain the gradients
-        with respect to the inputs and outputs respectively. The hook should
-        not modify its arguments, but it can optionally return a new gradient with
-        respect to the input that will be used in place of :attr:`grad_input` in
-        subsequent computations. :attr:`grad_input` will only correspond to the inputs given
-        as positional arguments and all kwarg arguments are ignored. Entries
-        in :attr:`grad_input` and :attr:`grad_output` will be ``None`` for all non-Tensor
-        arguments.
-
-        For technical reasons, when this hook is applied to a Module, its forward function will
-        receive a view of each Tensor passed to the Module. Similarly the caller will receive a view
-        of each Tensor returned by the Module's forward function.
-
-        .. warning ::
-            Modifying inputs or outputs inplace is not allowed when using backward hooks and
-            will raise an error.
-
-        Args:
-            hook (Callable): The user-defined hook to be registered.
-            prepend (bool): If true, the provided ``hook`` will be fired before
-                all existing ``backward`` hooks on this
-                :class:`torch.nn.modules.Module`. Otherwise, the provided
-                ``hook`` will be fired after all existing ``backward`` hooks on
-                this :class:`torch.nn.modules.Module`. Note that global
-                ``backward`` hooks registered with
-                :func:`register_module_full_backward_hook` will fire before
-                all hooks registered by this method.
-
-        Returns:
-            :class:`torch.utils.hooks.RemovableHandle`:
-                a handle that can be used to remove the added hook by calling
-                ``handle.remove()``
-
-        """
-        if self._is_full_backward_hook is False:
-            raise RuntimeError("Cannot use both regular backward hooks and full backward hooks on a "
-                               "single Module. Please use only one of them.")
-
-        self._is_full_backward_hook = True
-
-        handle = hooks.RemovableHandle(self._backward_hooks)
-        self._backward_hooks[handle.id] = hook
-        if prepend:
-            self._backward_hooks.move_to_end(handle.id, last=False)  # type: ignore[attr-defined]
-        return handle
-
-    def _get_backward_hooks(self):
-        r"""Return the backward hooks for use in the call function.
-
-        It returns two lists, one with the full backward hooks and one with the non-full
-        backward hooks.
-        """
-        full_backward_hooks: List[Callable] = []
-        if (_global_is_full_backward_hook is True):
-            full_backward_hooks += _global_backward_hooks.values()
-        if (self._is_full_backward_hook is True):
-            full_backward_hooks += self._backward_hooks.values()
-
-        non_full_backward_hooks: List[Callable] = []
-        if (_global_is_full_backward_hook is False):
-            non_full_backward_hooks += _global_backward_hooks.values()
-        if (self._is_full_backward_hook is False):
-            non_full_backward_hooks += self._backward_hooks.values()
-
-        return full_backward_hooks, non_full_backward_hooks
-
-    def _get_backward_pre_hooks(self):
-        backward_pre_hooks: List[Callable] = []
-        backward_pre_hooks += _global_backward_pre_hooks.values()
-        backward_pre_hooks += self._backward_pre_hooks.values()
-
-        return backward_pre_hooks
-
-    def _maybe_warn_non_full_backward_hook(self, inputs, result, grad_fn):
-        if not isinstance(result, torch.Tensor):
-            if not (isinstance(result, tuple) and all(isinstance(r, torch.Tensor) for r in result)):
-                warnings.warn("Using non-full backward hooks on a Module that does not return a "
-                              "single Tensor or a tuple of Tensors is deprecated and will be removed "
-                              "in future versions. This hook will be missing some of the grad_output. "
-                              "Please use register_full_backward_hook to get the documented behavior.")
-                return
-        else:
-            result = (result,)
-
-        if not isinstance(inputs, torch.Tensor):
-            if not (isinstance(inputs, tuple) and all(isinstance(i, torch.Tensor) for i in inputs)):
-                warnings.warn("Using non-full backward hooks on a Module that does not take as input a "
-                              "single Tensor or a tuple of Tensors is deprecated and will be removed "
-                              "in future versions. This hook will be missing some of the grad_input. "
-                              "Please use register_full_backward_hook to get the documented behavior.")
-                return
-        else:
-            inputs = (inputs,)
-
-        # At this point we are sure that inputs and result are tuple of Tensors
-        out_grad_fn = {r.grad_fn for r in result if r.grad_fn is not None}
-        if len(out_grad_fn) == 0 or (len(out_grad_fn) == 1 and grad_fn not in out_grad_fn):
-            warnings.warn("Using a non-full backward hook when outputs are nested in python data structure "
-                          "is deprecated and will be removed in future versions. This hook will be missing "
-                          "some grad_output.")
-        elif len(out_grad_fn) > 1:
-            warnings.warn("Using a non-full backward hook when outputs are generated by different autograd Nodes "
-                          "is deprecated and will be removed in future versions. This hook will be missing "
-                          "some grad_output. Please use register_full_backward_hook to get the documented behavior.")
-        else:
-            # At this point the grad_output part of the hook will most likely be correct
-            inputs_grad_fn = {i.grad_fn for i in inputs if i.grad_fn is not None}
-
-            next_functions = {n[0] for n in grad_fn.next_functions}
-
-            if inputs_grad_fn != next_functions:
-                warnings.warn("Using a non-full backward hook when the forward contains multiple autograd Nodes "
-                              "is deprecated and will be removed in future versions. This hook will be missing "
-                              "some grad_input. Please use register_full_backward_hook to get the documented "
-                              "behavior.")
-
-    def register_forward_pre_hook(
-        self,
-        hook: Union[
-            Callable[[T, Tuple[Any, ...]], Optional[Any]],
-            Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]],
-        ],
-        *,
-        prepend: bool = False,
-        with_kwargs: bool = False,
-    ) -> RemovableHandle:
-        r"""Register a forward pre-hook on the module.
-
-        The hook will be called every time before :func:`forward` is invoked.
-
-
-        If ``with_kwargs`` is false or not specified, the input contains only
-        the positional arguments given to the module. Keyword arguments won't be
-        passed to the hooks and only to the ``forward``. The hook can modify the
-        input. User can either return a tuple or a single modified value in the
-        hook. We will wrap the value into a tuple if a single value is returned
-        (unless that value is already a tuple). The hook should have the
-        following signature::
-
-            hook(module, args) -> None or modified input
-
-        If ``with_kwargs`` is true, the forward pre-hook will be passed the
-        kwargs given to the forward function. And if the hook modifies the
-        input, both the args and kwargs should be returned. The hook should have
-        the following signature::
-
-            hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-        Args:
-            hook (Callable): The user defined hook to be registered.
-            prepend (bool): If true, the provided ``hook`` will be fired before
-                all existing ``forward_pre`` hooks on this
-                :class:`torch.nn.modules.Module`. Otherwise, the provided
-                ``hook`` will be fired after all existing ``forward_pre`` hooks
-                on this :class:`torch.nn.modules.Module`. Note that global
-                ``forward_pre`` hooks registered with
-                :func:`register_module_forward_pre_hook` will fire before all
-                hooks registered by this method.
-                Default: ``False``
-            with_kwargs (bool): If true, the ``hook`` will be passed the kwargs
-                given to the forward function.
-                Default: ``False``
-
-        Returns:
-            :class:`torch.utils.hooks.RemovableHandle`:
-                a handle that can be used to remove the added hook by calling
-                ``handle.remove()``
-        """
-        handle = hooks.RemovableHandle(
-            self._forward_pre_hooks,
-            extra_dict=self._forward_pre_hooks_with_kwargs
-        )
-        self._forward_pre_hooks[handle.id] = hook
-        if with_kwargs:
-            self._forward_pre_hooks_with_kwargs[handle.id] = True
-
-        if prepend:
-            self._forward_pre_hooks.move_to_end(handle.id, last=False)  # type: ignore[attr-defined]
-        return handle
-
-    def register_forward_hook(
-        self,
-        hook: Union[
-            Callable[[T, Tuple[Any, ...], Any], Optional[Any]],
-            Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]],
-        ],
-        *,
-        prepend: bool = False,
-        with_kwargs: bool = False,
-        always_call: bool = False,
-    ) -> RemovableHandle:
-        r"""Register a forward hook on the module.
-
-        The hook will be called every time after :func:`forward` has computed an output.
-
-        If ``with_kwargs`` is ``False`` or not specified, the input contains only
-        the positional arguments given to the module. Keyword arguments won't be
-        passed to the hooks and only to the ``forward``. The hook can modify the
-        output. It can modify the input inplace but it will not have effect on
-        forward since this is called after :func:`forward` is called. The hook
-        should have the following signature::
-
-            hook(module, args, output) -> None or modified output
-
-        If ``with_kwargs`` is ``True``, the forward hook will be passed the
-        ``kwargs`` given to the forward function and be expected to return the
-        output possibly modified. The hook should have the following signature::
-
-            hook(module, args, kwargs, output) -> None or modified output
-
-        Args:
-            hook (Callable): The user defined hook to be registered.
-            prepend (bool): If ``True``, the provided ``hook`` will be fired
-                before all existing ``forward`` hooks on this
-                :class:`torch.nn.modules.Module`. Otherwise, the provided
-                ``hook`` will be fired after all existing ``forward`` hooks on
-                this :class:`torch.nn.modules.Module`. Note that global
-                ``forward`` hooks registered with
-                :func:`register_module_forward_hook` will fire before all hooks
-                registered by this method.
-                Default: ``False``
-            with_kwargs (bool): If ``True``, the ``hook`` will be passed the
-                kwargs given to the forward function.
-                Default: ``False``
-            always_call (bool): If ``True`` the ``hook`` will be run regardless of
-                whether an exception is raised while calling the Module.
-                Default: ``False``
-
-        Returns:
-            :class:`torch.utils.hooks.RemovableHandle`:
-                a handle that can be used to remove the added hook by calling
-                ``handle.remove()``
-        """
-        handle = hooks.RemovableHandle(
-            self._forward_hooks,
-            extra_dict=[self._forward_hooks_with_kwargs, self._forward_hooks_always_called],
-        )
-        self._forward_hooks[handle.id] = hook
-        if with_kwargs:
-            self._forward_hooks_with_kwargs[handle.id] = True
-        if always_call:
-            self._forward_hooks_always_called[handle.id] = True
-        if prepend:
-            self._forward_hooks.move_to_end(handle.id, last=False)  # type: ignore[attr-defined]
-        return handle
-
-    def _slow_forward(self, *input, **kwargs):
-        tracing_state = torch._C._get_tracing_state()
-        if not tracing_state or isinstance(self.forward, torch._C.ScriptMethod):
-            return self.forward(*input, **kwargs)
-        recording_scopes = torch.jit._trace._trace_module_map is not None
-        if recording_scopes:
-            # type ignore was added because at this point one knows that
-            # torch.jit._trace._trace_module_map is not Optional and has type Dict[Any, Any]
-            name = torch.jit._trace._trace_module_map[self] if self in torch.jit._trace._trace_module_map else None  # type: ignore[index, operator] # noqa: B950
-            if name:
-                tracing_state.push_scope(name)
-            else:
-                recording_scopes = False
-        try:
-            result = self.forward(*input, **kwargs)
-        finally:
-            if recording_scopes:
-                tracing_state.pop_scope()
-        return result
-
-    def _wrapped_call_impl(self, *args, **kwargs):
-        if self._compiled_call_impl is not None:
-            return self._compiled_call_impl(*args, **kwargs)  # type: ignore[misc]
-        else:
-            return self._call_impl(*args, **kwargs)
-
-    def _call_impl(self, *args, **kwargs):
-        forward_call = (self._slow_forward if torch._C._get_tracing_state() else self.forward)
-        # If we don't have any hooks, we want to skip the rest of the logic in
-        # this function, and just call forward.
-        if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
-                or _global_backward_pre_hooks or _global_backward_hooks
-                or _global_forward_hooks or _global_forward_pre_hooks):
-            return forward_call(*args, **kwargs)
-
-        try:
-            result = None
-            called_always_called_hooks = set()
-
-            full_backward_hooks, non_full_backward_hooks = [], []
-            backward_pre_hooks = []
-            if self._backward_pre_hooks or _global_backward_pre_hooks:
-                backward_pre_hooks = self._get_backward_pre_hooks()
-
-            if self._backward_hooks or _global_backward_hooks:
-                full_backward_hooks, non_full_backward_hooks = self._get_backward_hooks()
-
-            if _global_forward_pre_hooks or self._forward_pre_hooks:
-                for hook_id, hook in (
-                    *_global_forward_pre_hooks.items(),
-                    *self._forward_pre_hooks.items(),
-                ):
-                    if hook_id in self._forward_pre_hooks_with_kwargs:
-                        args_kwargs_result = hook(self, args, kwargs)  # type: ignore[misc]
-                        if args_kwargs_result is not None:
-                            if isinstance(args_kwargs_result, tuple) and len(args_kwargs_result) == 2:
-                                args, kwargs = args_kwargs_result
-                            else:
-                                raise RuntimeError(
-                                    "forward pre-hook must return None or a tuple "
-                                    f"of (new_args, new_kwargs), but got {args_kwargs_result}."
-                                )
-                    else:
-                        args_result = hook(self, args)
-                        if args_result is not None:
-                            if not isinstance(args_result, tuple):
-                                args_result = (args_result,)
-                            args = args_result
-
-            bw_hook = None
-            if full_backward_hooks or backward_pre_hooks:
-                bw_hook = hooks.BackwardHook(self, full_backward_hooks, backward_pre_hooks)
-                args = bw_hook.setup_input_hook(args)
-
-            result = forward_call(*args, **kwargs)
-            if _global_forward_hooks or self._forward_hooks:
-                for hook_id, hook in (
-                    *_global_forward_hooks.items(),
-                    *self._forward_hooks.items(),
-                ):
-                    # mark that always called hook is run
-                    if hook_id in self._forward_hooks_always_called or hook_id in _global_forward_hooks_always_called:
-                        called_always_called_hooks.add(hook_id)
-
-                    if hook_id in self._forward_hooks_with_kwargs:
-                        hook_result = hook(self, args, kwargs, result)
-                    else:
-                        hook_result = hook(self, args, result)
-
-                    if hook_result is not None:
-                        result = hook_result
-
-            if bw_hook:
-                if not isinstance(result, (torch.Tensor, tuple)):
-                    warnings.warn("For backward hooks to be called,"
-                                  " module output should be a Tensor or a tuple of Tensors"
-                                  f" but received {type(result)}")
-                result = bw_hook.setup_output_hook(result)
-
-            # Handle the non-full backward hooks
-            if non_full_backward_hooks:
-                var = result
-                while not isinstance(var, torch.Tensor):
-                    if isinstance(var, dict):
-                        var = next(v for v in var.values() if isinstance(v, torch.Tensor))
-                    else:
-                        var = var[0]
-                grad_fn = var.grad_fn
-                if grad_fn is not None:
-                    for hook in non_full_backward_hooks:
-                        grad_fn.register_hook(_WrappedHook(hook, self))
-                    self._maybe_warn_non_full_backward_hook(args, result, grad_fn)
-
-            return result
-
-        except Exception:
-            # run always called hooks if they have not already been run
-            # For now only forward hooks have the always_call option but perhaps
-            # this functionality should be added to full backward hooks as well.
-            for hook_id, hook in _global_forward_hooks.items():
-                if hook_id in _global_forward_hooks_always_called and hook_id not in called_always_called_hooks:  # type: ignore[possibly-undefined]
-                    try:
-                        hook_result = hook(self, args, result)  # type: ignore[possibly-undefined]
-                        if hook_result is not None:
-                            result = hook_result
-                    except Exception as e:
-                        warnings.warn("global module forward hook with ``always_call=True`` raised an exception "
-                                      f"that was silenced as another error was raised in forward: {str(e)}")
-                        continue
-
-            for hook_id, hook in self._forward_hooks.items():
-                if hook_id in self._forward_hooks_always_called and hook_id not in called_always_called_hooks:  # type: ignore[possibly-undefined]
-                    try:
-                        if hook_id in self._forward_hooks_with_kwargs:
-                            hook_result = hook(self, args, kwargs, result)  # type: ignore[possibly-undefined]
-                        else:
-                            hook_result = hook(self, args, result)  # type: ignore[possibly-undefined]
-                        if hook_result is not None:
-                            result = hook_result
-                    except Exception as e:
-                        warnings.warn("module forward hook with ``always_call=True`` raised an exception "
-                                      f"that was silenced as another error was raised in forward: {str(e)}")
-                        continue
-            # raise exception raised in try block
-            raise
-
-
-    __call__ : Callable[..., Any] = _wrapped_call_impl
-
-    def __getstate__(self):
-        state = self.__dict__.copy()
-        state.pop("_compiled_call_impl", None)
-        return state
-
-    def __setstate__(self, state):
-        self.__dict__.update(state)
-
-        # Support loading old checkpoints that don't have the following attrs:
-        if '_forward_pre_hooks' not in self.__dict__:
-            self._forward_pre_hooks = OrderedDict()
-        if '_forward_pre_hooks_with_kwargs' not in self.__dict__:
-            self._forward_pre_hooks_with_kwargs = OrderedDict()
-        if '_forward_hooks_with_kwargs' not in self.__dict__:
-            self._forward_hooks_with_kwargs = OrderedDict()
-        if '_forward_hooks_always_called' not in self.__dict__:
-            self._forward_hooks_always_called = OrderedDict()
-        if '_state_dict_hooks' not in self.__dict__:
-            self._state_dict_hooks = OrderedDict()
-        if '_state_dict_pre_hooks' not in self.__dict__:
-            self._state_dict_pre_hooks = OrderedDict()
-        if '_load_state_dict_pre_hooks' not in self.__dict__:
-            self._load_state_dict_pre_hooks = OrderedDict()
-        if '_load_state_dict_post_hooks' not in self.__dict__:
-            self._load_state_dict_post_hooks = OrderedDict()
-        if '_non_persistent_buffers_set' not in self.__dict__:
-            self._non_persistent_buffers_set = set()
-        if '_is_full_backward_hook' not in self.__dict__:
-            self._is_full_backward_hook = None
-        if '_backward_pre_hooks' not in self.__dict__:
-            self._backward_pre_hooks = OrderedDict()
-
-    # On the return type:
-    # We choose to return `Any` in the `__getattr__` type signature instead of a more strict `Union[Tensor, Module]`.
-    # This is done for better interop with various type checkers for the end users.
-    # Having a stricter return type doesn't play nicely with `register_buffer()` and forces
-    # people to excessively use type-ignores, asserts, casts, etc.
-    # See full discussion on the problems with returning `Union` here
-    # https://github.com/microsoft/pyright/issues/4213
-    def __getattr__(self, name: str) -> Any:
-        if '_parameters' in self.__dict__:
-            _parameters = self.__dict__['_parameters']
-            if name in _parameters:
-                return _parameters[name]
-        if '_buffers' in self.__dict__:
-            _buffers = self.__dict__['_buffers']
-            if name in _buffers:
-                return _buffers[name]
-        if '_modules' in self.__dict__:
-            modules = self.__dict__['_modules']
-            if name in modules:
-                return modules[name]
-        raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
-
-    def __setattr__(self, name: str, value: Union[Tensor, 'Module']) -> None:
-        def remove_from(*dicts_or_sets):
-            for d in dicts_or_sets:
-                if name in d:
-                    if isinstance(d, dict):
-                        del d[name]
-                    else:
-                        d.discard(name)
-
-        params = self.__dict__.get('_parameters')
-        if isinstance(value, Parameter):
-            if params is None:
-                raise AttributeError(
-                    "cannot assign parameters before Module.__init__() call")
-            remove_from(self.__dict__, self._buffers, self._modules, self._non_persistent_buffers_set)
-            self.register_parameter(name, value)
-        elif params is not None and name in params:
-            if value is not None:
-                raise TypeError(f"cannot assign '{torch.typename(value)}' as parameter '{name}' "
-                                "(torch.nn.Parameter or None expected)"
-                                )
-            self.register_parameter(name, value)
-        else:
-            modules = self.__dict__.get('_modules')
-            if isinstance(value, Module):
-                if modules is None:
-                    raise AttributeError(
-                        "cannot assign module before Module.__init__() call")
-                remove_from(self.__dict__, self._parameters, self._buffers, self._non_persistent_buffers_set)
-                for hook in _global_module_registration_hooks.values():
-                    output = hook(self, name, value)
-                    if output is not None:
-                        value = output
-                modules[name] = value
-            elif modules is not None and name in modules:
-                if value is not None:
-                    raise TypeError(f"cannot assign '{torch.typename(value)}' as child module '{name}' "
-                                    "(torch.nn.Module or None expected)"
-                                    )
-                for hook in _global_module_registration_hooks.values():
-                    output = hook(self, name, value)
-                    if output is not None:
-                        value = output
-                modules[name] = value
-            else:
-                buffers = self.__dict__.get('_buffers')
-                if buffers is not None and name in buffers:
-                    if value is not None and not isinstance(value, torch.Tensor):
-                        raise TypeError(f"cannot assign '{torch.typename(value)}' as buffer '{name}' "
-                                        "(torch.Tensor or None expected)"
-                                        )
-                    for hook in _global_buffer_registration_hooks.values():
-                        output = hook(self, name, value)
-                        if output is not None:
-                            value = output
-                    buffers[name] = value
-                else:
-                    super().__setattr__(name, value)
-
-    def __delattr__(self, name):
-        if name in self._parameters:
-            del self._parameters[name]
-        elif name in self._buffers:
-            del self._buffers[name]
-            self._non_persistent_buffers_set.discard(name)
-        elif name in self._modules:
-            del self._modules[name]
-        else:
-            super().__delattr__(name)
-
-    def _register_state_dict_hook(self, hook):
-        r"""Register a state-dict hook.
-
-        These hooks will be called with arguments: `self`, `state_dict`,
-        `prefix`, `local_metadata`, after the `state_dict` of `self` is set.
-        Note that only parameters and buffers of `self` or its children are
-        guaranteed to exist in `state_dict`. The hooks may modify `state_dict`
-        inplace or return a new one.
-        """
-        handle = hooks.RemovableHandle(self._state_dict_hooks)
-        self._state_dict_hooks[handle.id] = hook
-        return handle
-
-    def register_state_dict_pre_hook(self, hook):
-        r"""Register a pre-hook for the :meth:`~torch.nn.Module.state_dict` method.
-
-        These hooks will be called with arguments: ``self``, ``prefix``,
-        and ``keep_vars`` before calling ``state_dict`` on ``self``. The registered
-        hooks can be used to perform pre-processing before the ``state_dict``
-        call is made.
-        """
-        handle = hooks.RemovableHandle(self._state_dict_pre_hooks)
-        self._state_dict_pre_hooks[handle.id] = hook
-        return handle
-
-    def _save_to_state_dict(self, destination, prefix, keep_vars):
-        r"""Save module state to the `destination` dictionary.
-
-        The `destination` dictionary will contain the state
-        of the module, but not its descendants. This is called on every
-        submodule in :meth:`~torch.nn.Module.state_dict`.
-
-        In rare cases, subclasses can achieve class-specific behavior by
-        overriding this method with custom logic.
-
-        Args:
-            destination (dict): a dict where state will be stored
-            prefix (str): the prefix for parameters and buffers used in this
-                module
-        """
-        for name, param in self._parameters.items():
-            if param is not None:
-                destination[prefix + name] = param if keep_vars else param.detach()
-        for name, buf in self._buffers.items():
-            if buf is not None and name not in self._non_persistent_buffers_set:
-                destination[prefix + name] = buf if keep_vars else buf.detach()
-        extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX
-        if getattr(self.__class__, "get_extra_state", Module.get_extra_state) is not Module.get_extra_state:
-            destination[extra_state_key] = self.get_extra_state()
-
-    # The user can pass an optional arbitrary mappable object to `state_dict`, in which case `state_dict` returns
-    # back that same object. But if they pass nothing, an `OrderedDict` is created and returned.
-    T_destination = TypeVar('T_destination', bound=Dict[str, Any])
-
-    @overload
-    def state_dict(self, *, destination: T_destination, prefix: str = ..., keep_vars: bool = ...) -> T_destination:
-        ...
-
-    @overload
-    def state_dict(self, *, prefix: str = ..., keep_vars: bool = ...) -> Dict[str, Any]:
-        ...
-
-    # TODO: Change `*args` to `*` and remove the corresponding warning in docs when BC allows.
-    # Also remove the logic for arg parsing together.
-    def state_dict(self, *args, destination=None, prefix='', keep_vars=False):
-        r"""Return a dictionary containing references to the whole state of the module.
-
-        Both parameters and persistent buffers (e.g. running averages) are
-        included. Keys are corresponding parameter and buffer names.
-        Parameters and buffers set to ``None`` are not included.
-
-        .. note::
-            The returned object is a shallow copy. It contains references
-            to the module's parameters and buffers.
-
-        .. warning::
-            Currently ``state_dict()`` also accepts positional arguments for
-            ``destination``, ``prefix`` and ``keep_vars`` in order. However,
-            this is being deprecated and keyword arguments will be enforced in
-            future releases.
-
-        .. warning::
-            Please avoid the use of argument ``destination`` as it is not
-            designed for end-users.
-
-        Args:
-            destination (dict, optional): If provided, the state of module will
-                be updated into the dict and the same object is returned.
-                Otherwise, an ``OrderedDict`` will be created and returned.
-                Default: ``None``.
-            prefix (str, optional): a prefix added to parameter and buffer
-                names to compose the keys in state_dict. Default: ``''``.
-            keep_vars (bool, optional): by default the :class:`~torch.Tensor` s
-                returned in the state dict are detached from autograd. If it's
-                set to ``True``, detaching will not be performed.
-                Default: ``False``.
-
-        Returns:
-            dict:
-                a dictionary containing a whole state of the module
-
-        Example::
-
-            >>> # xdoctest: +SKIP("undefined vars")
-            >>> module.state_dict().keys()
-            ['bias', 'weight']
-
-        """
-        # TODO: Remove `args` and the parsing logic when BC allows.
-        if len(args) > 0:
-            if destination is None:
-                destination = args[0]
-            if len(args) > 1 and prefix == '':
-                prefix = args[1]
-            if len(args) > 2 and keep_vars is False:
-                keep_vars = args[2]
-            # DeprecationWarning is ignored by default
-            warnings.warn(
-                "Positional args are being deprecated, use kwargs instead. Refer to "
-                "https://pytorch.org/docs/master/generated/torch.nn.Module.html#torch.nn.Module.state_dict"
-                " for details.")
-
-        if destination is None:
-            destination = OrderedDict()
-            destination._metadata = OrderedDict()
-
-        local_metadata = dict(version=self._version)
-        if hasattr(destination, "_metadata"):
-            destination._metadata[prefix[:-1]] = local_metadata
-
-        for hook in self._state_dict_pre_hooks.values():
-            hook(self, prefix, keep_vars)
-        self._save_to_state_dict(destination, prefix, keep_vars)
-        for name, module in self._modules.items():
-            if module is not None:
-                module.state_dict(destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars)
-        for hook in self._state_dict_hooks.values():
-            hook_result = hook(self, destination, prefix, local_metadata)
-            if hook_result is not None:
-                destination = hook_result
-        return destination
-
-    def _register_load_state_dict_pre_hook(self, hook, with_module=False):
-        r"""Register a pre-hook for the :meth:`~torch.nn.Module.load_state_dict` method.
-
-        These hooks will be called with arguments: `state_dict`, `prefix`,
-        `local_metadata`, `strict`, `missing_keys`, `unexpected_keys`,
-        `error_msgs`, before loading `state_dict` into `self`. These arguments
-        are exactly the same as those of `_load_from_state_dict`.
-
-        If ``with_module`` is ``True``, then the first argument to the hook is
-        an instance of the module.
-
-        Arguments:
-            hook (Callable): Callable hook that will be invoked before
-                loading the state dict.
-            with_module (bool, optional): Whether or not to pass the module
-                instance to the hook as the first parameter.
-        """
-        handle = hooks.RemovableHandle(self._load_state_dict_pre_hooks)
-        self._load_state_dict_pre_hooks[handle.id] = _WrappedHook(hook, self if with_module else None)
-        return handle
-
-    def register_load_state_dict_post_hook(self, hook):
-        r"""Register a post hook to be run after module's ``load_state_dict`` is called.
-
-        It should have the following signature::
-            hook(module, incompatible_keys) -> None
-
-        The ``module`` argument is the current module that this hook is registered
-        on, and the ``incompatible_keys`` argument is a ``NamedTuple`` consisting
-        of attributes ``missing_keys`` and ``unexpected_keys``. ``missing_keys``
-        is a ``list`` of ``str`` containing the missing keys and
-        ``unexpected_keys`` is a ``list`` of ``str`` containing the unexpected keys.
-
-        The given incompatible_keys can be modified inplace if needed.
-
-        Note that the checks performed when calling :func:`load_state_dict` with
-        ``strict=True`` are affected by modifications the hook makes to
-        ``missing_keys`` or ``unexpected_keys``, as expected. Additions to either
-        set of keys will result in an error being thrown when ``strict=True``, and
-        clearing out both missing and unexpected keys will avoid an error.
-
-        Returns:
-            :class:`torch.utils.hooks.RemovableHandle`:
-                a handle that can be used to remove the added hook by calling
-                ``handle.remove()``
-        """
-        handle = hooks.RemovableHandle(self._load_state_dict_post_hooks)
-        self._load_state_dict_post_hooks[handle.id] = hook
-        return handle
-
-
-    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
-                              missing_keys, unexpected_keys, error_msgs):
-        r"""Copy parameters and buffers from :attr:`state_dict` into only this module, but not its descendants.
-
-        This is called on every submodule
-        in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this
-        module in input :attr:`state_dict` is provided as :attr:`local_metadata`.
-        For state dicts without metadata, :attr:`local_metadata` is empty.
-        Subclasses can achieve class-specific backward compatible loading using
-        the version number at `local_metadata.get("version", None)`.
-        Additionally, :attr:`local_metadata` can also contain the key
-        `assign_to_params_buffers` that indicates whether keys should be
-        assigned their corresponding tensor in the state_dict.
-
-        .. note::
-            :attr:`state_dict` is not the same object as the input
-            :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So
-            it can be modified.
-
-        Args:
-            state_dict (dict): a dict containing parameters and
-                persistent buffers.
-            prefix (str): the prefix for parameters and buffers used in this
-                module
-            local_metadata (dict): a dict containing the metadata for this module.
-                See
-            strict (bool): whether to strictly enforce that the keys in
-                :attr:`state_dict` with :attr:`prefix` match the names of
-                parameters and buffers in this module
-            missing_keys (list of str): if ``strict=True``, add missing keys to
-                this list
-            unexpected_keys (list of str): if ``strict=True``, add unexpected
-                keys to this list
-            error_msgs (list of str): error messages should be added to this
-                list, and will be reported together in
-                :meth:`~torch.nn.Module.load_state_dict`
-        """
-        for hook in self._load_state_dict_pre_hooks.values():
-            hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
-
-        persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set}
-        local_name_params = itertools.chain(self._parameters.items(), persistent_buffers.items())
-        local_state = {k: v for k, v in local_name_params if v is not None}
-        assign_to_params_buffers = local_metadata.get("assign_to_params_buffers", False)
-        use_swap_tensors = torch.__future__.get_swap_module_params_on_conversion()
-
-        for name, param in local_state.items():
-            key = prefix + name
-            if key in state_dict:
-                input_param = state_dict[key]
-                if not torch.overrides.is_tensor_like(input_param):
-                    error_msgs.append(f'While copying the parameter named "{key}", '
-                                      'expected torch.Tensor or Tensor-like object from checkpoint but '
-                                      f'received {type(input_param)}'
-                                      )
-                    continue
-
-                # This is used to avoid copying uninitialized parameters into
-                # non-lazy modules, since they dont have the hook to do the checks
-                # in such case, it will error when accessing the .shape attribute.
-                is_param_lazy = torch.nn.parameter.is_lazy(param)
-                # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+
-                if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1:
-                    input_param = input_param[0]
-
-                if not is_param_lazy and input_param.shape != param.shape:
-                    # local shape should match the one in checkpoint
-                    error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, '
-                                      'the shape in current model is {}.'
-                                      .format(key, input_param.shape, param.shape))
-                    continue
-
-                if param.is_meta and not input_param.is_meta and not assign_to_params_buffers:
-                    warnings.warn(f'for {key}: copying from a non-meta parameter in the checkpoint to a meta '
-                                  'parameter in the current model, which is a no-op. (Did you mean to '
-                                  'pass `assign=True` to assign items in the state dictionary to their '
-                                  'corresponding key in the module instead of copying them in place?)')
-
-                try:
-                    with torch.no_grad():
-                        if use_swap_tensors:
-                            new_input_param = param.module_load(input_param, assign=assign_to_params_buffers)
-                            if id(new_input_param) == id(input_param) or id(new_input_param) == id(param):
-                                raise RuntimeError("module_load returned one of self or other, please .detach() "
-                                                   "the result if returning one of the inputs in module_load")
-                            if (isinstance(param, torch.nn.Parameter)):
-                                if not isinstance(new_input_param, torch.nn.Parameter):
-                                    new_input_param = torch.nn.Parameter(new_input_param, requires_grad=param.requires_grad)
-                                else:
-                                    new_input_param.requires_grad_(param.requires_grad)
-                            torch.utils.swap_tensors(param, new_input_param)
-                            del new_input_param
-                        elif assign_to_params_buffers:
-                            # Shape checks are already done above
-                            if (isinstance(param, torch.nn.Parameter)):
-                                if not isinstance(input_param, torch.nn.Parameter):
-                                    input_param = torch.nn.Parameter(input_param, requires_grad=param.requires_grad)
-                                else:
-                                    input_param.requires_grad_(param.requires_grad)
-                            setattr(self, name, input_param)
-                        else:
-                            param.copy_(input_param)
-                except Exception as ex:
-                    action = "swapping" if use_swap_tensors else "copying"
-                    error_msgs.append(f'While {action} the parameter named "{key}", '
-                                      f'whose dimensions in the model are {param.size()} and '
-                                      f'whose dimensions in the checkpoint are {input_param.size()}, '
-                                      f'an exception occurred : {ex.args}.'
-                                      )
-            elif strict:
-                missing_keys.append(key)
-
-        extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX
-        if getattr(self.__class__, "set_extra_state", Module.set_extra_state) is not Module.set_extra_state:
-            if extra_state_key in state_dict:
-                self.set_extra_state(state_dict[extra_state_key])
-            elif strict:
-                missing_keys.append(extra_state_key)
-        elif strict and (extra_state_key in state_dict):
-            unexpected_keys.append(extra_state_key)
-
-        if strict:
-            for key in state_dict.keys():
-                if key.startswith(prefix) and key != extra_state_key:
-                    input_name = key[len(prefix):]
-                    input_name = input_name.split('.', 1)[0]  # get the name of param/buffer/child
-                    if input_name not in self._modules and input_name not in local_state:
-                        unexpected_keys.append(key)
-
-    def load_state_dict(self, state_dict: Mapping[str, Any],
-                        strict: bool = True, assign: bool = False):
-        r"""Copy parameters and buffers from :attr:`state_dict` into this module and its descendants.
-
-        If :attr:`strict` is ``True``, then
-        the keys of :attr:`state_dict` must exactly match the keys returned
-        by this module's :meth:`~torch.nn.Module.state_dict` function.
-
-        .. warning::
-            If :attr:`assign` is ``True`` the optimizer must be created after
-            the call to :attr:`load_state_dict` unless
-            :func:`~torch.__future__.get_swap_module_params_on_conversion` is ``True``.
-
-        Args:
-            state_dict (dict): a dict containing parameters and
-                persistent buffers.
-            strict (bool, optional): whether to strictly enforce that the keys
-                in :attr:`state_dict` match the keys returned by this module's
-                :meth:`~torch.nn.Module.state_dict` function. Default: ``True``
-            assign (bool, optional): When ``False``, the properties of the tensors
-                in the current module are preserved while when ``True``, the
-                properties of the Tensors in the state dict are preserved. The only
-                exception is the ``requires_grad`` field of :class:`~torch.nn.Parameter`s
-                for which the value from the module is preserved.
-                Default: ``False``
-
-        Returns:
-            ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:
-                * **missing_keys** is a list of str containing the missing keys
-                * **unexpected_keys** is a list of str containing the unexpected keys
-
-        Note:
-            If a parameter or buffer is registered as ``None`` and its corresponding key
-            exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a
-            ``RuntimeError``.
-        """
-        if not isinstance(state_dict, Mapping):
-            raise TypeError(f"Expected state_dict to be dict-like, got {type(state_dict)}.")
-
-        missing_keys: List[str] = []
-        unexpected_keys: List[str] = []
-        error_msgs: List[str] = []
-
-        # copy state_dict so _load_from_state_dict can modify it
-        metadata = getattr(state_dict, '_metadata', None)
-        state_dict = OrderedDict(state_dict)
-        if metadata is not None:
-            # mypy isn't aware that "_metadata" exists in state_dict
-            state_dict._metadata = metadata  # type: ignore[attr-defined]
-
-        def load(module, local_state_dict, prefix=''):
-            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
-            if assign:
-                local_metadata['assign_to_params_buffers'] = assign
-            module._load_from_state_dict(
-                local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
-            for name, child in module._modules.items():
-                if child is not None:
-                    child_prefix = prefix + name + '.'
-                    child_state_dict = {k: v for k, v in local_state_dict.items() if k.startswith(child_prefix)}
-                    load(child, child_state_dict, child_prefix)  # noqa: F821
-
-            # Note that the hook can modify missing_keys and unexpected_keys.
-            incompatible_keys = _IncompatibleKeys(missing_keys, unexpected_keys)
-            for hook in module._load_state_dict_post_hooks.values():
-                out = hook(module, incompatible_keys)
-                assert out is None, (
-                    "Hooks registered with ``register_load_state_dict_post_hook`` are not"
-                    "expected to return new values, if incompatible_keys need to be modified,"
-                    "it should be done inplace."
-                )
-
-        load(self, state_dict)
-        del load
-
-        if strict:
-            if len(unexpected_keys) > 0:
-                error_msgs.insert(
-                    0, 'Unexpected key(s) in state_dict: {}. '.format(
-                        ', '.join(f'"{k}"' for k in unexpected_keys)))
-            if len(missing_keys) > 0:
-                error_msgs.insert(
-                    0, 'Missing key(s) in state_dict: {}. '.format(
-                        ', '.join(f'"{k}"' for k in missing_keys)))
-
-        if len(error_msgs) > 0:
-            raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
-                               self.__class__.__name__, "\n\t".join(error_msgs)))
-        return _IncompatibleKeys(missing_keys, unexpected_keys)
-
-    def _named_members(self, get_members_fn, prefix='', recurse=True, remove_duplicate: bool = True):
-        r"""Help yield various names + members of modules."""
-        memo = set()
-        modules = self.named_modules(prefix=prefix, remove_duplicate=remove_duplicate) if recurse else [(prefix, self)]
-        for module_prefix, module in modules:
-            members = get_members_fn(module)
-            for k, v in members:
-                if v is None or v in memo:
-                    continue
-                if remove_duplicate:
-                    memo.add(v)
-                name = module_prefix + ('.' if module_prefix else '') + k
-                yield name, v
-
-    def parameters(self, recurse: bool = True) -> Iterator[Parameter]:
-        r"""Return an iterator over module parameters.
-
-        This is typically passed to an optimizer.
-
-        Args:
-            recurse (bool): if True, then yields parameters of this module
-                and all submodules. Otherwise, yields only parameters that
-                are direct members of this module.
-
-        Yields:
-            Parameter: module parameter
-
-        Example::
-
-            >>> # xdoctest: +SKIP("undefined vars")
-            >>> for param in model.parameters():
-            >>>     print(type(param), param.size())
-            <class 'torch.Tensor'> (20L,)
-            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-        """
-        for name, param in self.named_parameters(recurse=recurse):
-            yield param
-
-    def named_parameters(
-            self,
-            prefix: str = '',
-            recurse: bool = True,
-            remove_duplicate: bool = True
-    ) -> Iterator[Tuple[str, Parameter]]:
-        r"""Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.
-
-        Args:
-            prefix (str): prefix to prepend to all parameter names.
-            recurse (bool): if True, then yields parameters of this module
-                and all submodules. Otherwise, yields only parameters that
-                are direct members of this module.
-            remove_duplicate (bool, optional): whether to remove the duplicated
-                parameters in the result. Defaults to True.
-
-        Yields:
-            (str, Parameter): Tuple containing the name and parameter
-
-        Example::
-
-            >>> # xdoctest: +SKIP("undefined vars")
-            >>> for name, param in self.named_parameters():
-            >>>     if name in ['bias']:
-            >>>         print(param.size())
-
-        """
-        gen = self._named_members(
-            lambda module: module._parameters.items(),
-            prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate)
-        yield from gen
-
-    def buffers(self, recurse: bool = True) -> Iterator[Tensor]:
-        r"""Return an iterator over module buffers.
-
-        Args:
-            recurse (bool): if True, then yields buffers of this module
-                and all submodules. Otherwise, yields only buffers that
-                are direct members of this module.
-
-        Yields:
-            torch.Tensor: module buffer
-
-        Example::
-
-            >>> # xdoctest: +SKIP("undefined vars")
-            >>> for buf in model.buffers():
-            >>>     print(type(buf), buf.size())
-            <class 'torch.Tensor'> (20L,)
-            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-        """
-        for _, buf in self.named_buffers(recurse=recurse):
-            yield buf
-
-    def named_buffers(self, prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) -> Iterator[Tuple[str, Tensor]]:
-        r"""Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.
-
-        Args:
-            prefix (str): prefix to prepend to all buffer names.
-            recurse (bool, optional): if True, then yields buffers of this module
-                and all submodules. Otherwise, yields only buffers that
-                are direct members of this module. Defaults to True.
-            remove_duplicate (bool, optional): whether to remove the duplicated buffers in the result. Defaults to True.
-
-        Yields:
-            (str, torch.Tensor): Tuple containing the name and buffer
-
-        Example::
-
-            >>> # xdoctest: +SKIP("undefined vars")
-            >>> for name, buf in self.named_buffers():
-            >>>     if name in ['running_var']:
-            >>>         print(buf.size())
-
-        """
-        gen = self._named_members(
-            lambda module: module._buffers.items(),
-            prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate)
-        yield from gen
-
-    def children(self) -> Iterator['Module']:
-        r"""Return an iterator over immediate children modules.
-
-        Yields:
-            Module: a child module
-        """
-        for name, module in self.named_children():
-            yield module
-
-    def named_children(self) -> Iterator[Tuple[str, 'Module']]:
-        r"""Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.
-
-        Yields:
-            (str, Module): Tuple containing a name and child module
-
-        Example::
-
-            >>> # xdoctest: +SKIP("undefined vars")
-            >>> for name, module in model.named_children():
-            >>>     if name in ['conv4', 'conv5']:
-            >>>         print(module)
-
-        """
-        memo = set()
-        for name, module in self._modules.items():
-            if module is not None and module not in memo:
-                memo.add(module)
-                yield name, module
-
-    def modules(self) -> Iterator['Module']:
-        r"""Return an iterator over all modules in the network.
-
-        Yields:
-            Module: a module in the network
-
-        Note:
-            Duplicate modules are returned only once. In the following
-            example, ``l`` will be returned only once.
-
-        Example::
-
-            >>> l = nn.Linear(2, 2)
-            >>> net = nn.Sequential(l, l)
-            >>> for idx, m in enumerate(net.modules()):
-            ...     print(idx, '->', m)
-
-            0 -> Sequential(
-              (0): Linear(in_features=2, out_features=2, bias=True)
-              (1): Linear(in_features=2, out_features=2, bias=True)
-            )
-            1 -> Linear(in_features=2, out_features=2, bias=True)
-
-        """
-        for _, module in self.named_modules():
-            yield module
-
-    def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = '', remove_duplicate: bool = True):
-        r"""Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.
-
-        Args:
-            memo: a memo to store the set of modules already added to the result
-            prefix: a prefix that will be added to the name of the module
-            remove_duplicate: whether to remove the duplicated module instances in the result
-                or not
-
-        Yields:
-            (str, Module): Tuple of name and module
-
-        Note:
-            Duplicate modules are returned only once. In the following
-            example, ``l`` will be returned only once.
-
-        Example::
-
-            >>> l = nn.Linear(2, 2)
-            >>> net = nn.Sequential(l, l)
-            >>> for idx, m in enumerate(net.named_modules()):
-            ...     print(idx, '->', m)
-
-            0 -> ('', Sequential(
-              (0): Linear(in_features=2, out_features=2, bias=True)
-              (1): Linear(in_features=2, out_features=2, bias=True)
-            ))
-            1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-        """
-        if memo is None:
-            memo = set()
-        if self not in memo:
-            if remove_duplicate:
-                memo.add(self)
-            yield prefix, self
-            for name, module in self._modules.items():
-                if module is None:
-                    continue
-                submodule_prefix = prefix + ('.' if prefix else '') + name
-                yield from module.named_modules(memo, submodule_prefix, remove_duplicate)
-
-    def train(self: T, mode: bool = True) -> T:
-        r"""Set the module in training mode.
-
-        This has any effect only on certain modules. See documentations of
-        particular modules for details of their behaviors in training/evaluation
-        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,
-        etc.
-
-        Args:
-            mode (bool): whether to set training mode (``True``) or evaluation
-                         mode (``False``). Default: ``True``.
-
-        Returns:
-            Module: self
-        """
-        if not isinstance(mode, bool):
-            raise ValueError("training mode is expected to be boolean")
-        self.training = mode
-        for module in self.children():
-            module.train(mode)
-        return self
-
-    def eval(self: T) -> T:
-        r"""Set the module in evaluation mode.
-
-        This has any effect only on certain modules. See documentations of
-        particular modules for details of their behaviors in training/evaluation
-        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,
-        etc.
-
-        This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`.
-
-        See :ref:`locally-disable-grad-doc` for a comparison between
-        `.eval()` and several similar mechanisms that may be confused with it.
-
-        Returns:
-            Module: self
-        """
-        return self.train(False)
-
-    def requires_grad_(self: T, requires_grad: bool = True) -> T:
-        r"""Change if autograd should record operations on parameters in this module.
-
-        This method sets the parameters' :attr:`requires_grad` attributes
-        in-place.
-
-        This method is helpful for freezing part of the module for finetuning
-        or training parts of a model individually (e.g., GAN training).
-
-        See :ref:`locally-disable-grad-doc` for a comparison between
-        `.requires_grad_()` and several similar mechanisms that may be confused with it.
-
-        Args:
-            requires_grad (bool): whether autograd should record operations on
-                                  parameters in this module. Default: ``True``.
-
-        Returns:
-            Module: self
-        """
-        for p in self.parameters():
-            p.requires_grad_(requires_grad)
-        return self
-
-    def zero_grad(self, set_to_none: bool = True) -> None:
-        r"""Reset gradients of all model parameters.
-
-        See similar function under :class:`torch.optim.Optimizer` for more context.
-
-        Args:
-            set_to_none (bool): instead of setting to zero, set the grads to None.
-                See :meth:`torch.optim.Optimizer.zero_grad` for details.
-        """
-        if getattr(self, '_is_replica', False):
-            warnings.warn(
-                "Calling .zero_grad() from a module created with nn.DataParallel() has no effect. "
-                "The parameters are copied (in a differentiable manner) from the original module. "
-                "This means they are not leaf nodes in autograd and so don't accumulate gradients. "
-                "If you need gradients in your forward method, consider using autograd.grad instead.")
-
-        for p in self.parameters():
-            if p.grad is not None:
-                if set_to_none:
-                    p.grad = None
-                else:
-                    if p.grad.grad_fn is not None:
-                        p.grad.detach_()
-                    else:
-                        p.grad.requires_grad_(False)
-                    p.grad.zero_()
-
-    def share_memory(self: T) -> T:
-        r"""See :meth:`torch.Tensor.share_memory_`."""
-        return self._apply(lambda t: t.share_memory_())
-
-    def _get_name(self):
-        return self.__class__.__name__
-
-    def extra_repr(self) -> str:
-        r"""Set the extra representation of the module.
-
-        To print customized extra information, you should re-implement
-        this method in your own modules. Both single-line and multi-line
-        strings are acceptable.
-        """
-        return ''
-
-    def __repr__(self):
-        # We treat the extra repr like the sub-module, one item per line
-        extra_lines = []
-        extra_repr = self.extra_repr()
-        # empty string will be split into list ['']
-        if extra_repr:
-            extra_lines = extra_repr.split('\n')
-        child_lines = []
-        for key, module in self._modules.items():
-            mod_str = repr(module)
-            mod_str = _addindent(mod_str, 2)
-            child_lines.append('(' + key + '): ' + mod_str)
-        lines = extra_lines + child_lines
-
-        main_str = self._get_name() + '('
-        if lines:
-            # simple one-liner info, which most builtin Modules will use
-            if len(extra_lines) == 1 and not child_lines:
-                main_str += extra_lines[0]
-            else:
-                main_str += '\n  ' + '\n  '.join(lines) + '\n'
-
-        main_str += ')'
-        return main_str
-
-    def __dir__(self):
-        module_attrs = dir(self.__class__)
-        attrs = list(self.__dict__.keys())
-        parameters = list(self._parameters.keys())
-        modules = list(self._modules.keys())
-        buffers = list(self._buffers.keys())
-        keys = module_attrs + attrs + parameters + modules + buffers
-
-        # Eliminate attrs that are not legal Python variable names
-        keys = [key for key in keys if not key[0].isdigit()]
-
-        return sorted(keys)
-
-    def _replicate_for_data_parallel(self):
-        replica = self.__new__(type(self))
-        replica.__dict__ = self.__dict__.copy()
-
-        # replicas do not have parameters themselves, the replicas reference the original
-        # module.
-        replica._parameters = OrderedDict()
-        replica._buffers = replica._buffers.copy()
-        replica._modules = replica._modules.copy()
-        replica._is_replica = True  # type: ignore[assignment]
-
-        return replica
-
-    def compile(self, *args, **kwargs):
-        """
-        Compile this Module's forward using :func:`torch.compile`.
-
-        This Module's `__call__` method is compiled and all arguments are passed as-is
-        to :func:`torch.compile`.
-
-        See :func:`torch.compile` for details on the arguments for this function.
-        """
-        self._compiled_call_impl = torch.compile(self._call_impl, *args, **kwargs)
-
- -
-
- -
-
-
-
- - - - \ No newline at end of file diff --git a/_rst/_code.html b/_rst/_code.html index 12ada4b1..035a238f 100644 --- a/_rst/_code.html +++ b/_rst/_code.html @@ -1,207 +1,572 @@ + - - - - - Code Documentation — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Code Documentation — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Code Documentation

    +
    + + + + + +
    + +
    +

    Code Documentation#

    Welcome to PINA documentation! Here you can find the modules of the package divided in different sections. The high-level structure of the package is depicted in our API.

    -
    +
    PINA application program interface -
    +

    The pipeline to solve differential equations with PINA follows just five steps:

      @@ -209,11 +574,11 @@

      Code DocumentationGeometries, or load high level simulation results as LabelTensor

    1. Choose or build one or more Models to solve the problem

    2. Choose a solver across PINA available Solvers, or build one using the SolverInterface

    3. -
    4. Train the model with the PINA Trainer, enhance the train with Callbacks_

    5. +
    6. Train the model with the PINA Trainer, enhance the train with Callbacks

    -
    -

    PINA Features

    +
    +

    PINA Features#

    -
    -
    -

    Solvers

    +
    +
    +

    Solvers#

    -
    -
    -

    Models

    +
+
+

Models#

-
-
-

Layers

+ +
+

Layers#

-
-
-

Adaptive Activation Functions

+ +
+

Adaptive Activation Functions#

-
-
-

Equations and Operators

+ +
+

Equations and Operators#

-
-
-

Problem

+ +
+

Problem#

-
-
-

Geometries

+ +
+

Geometries#

-
-
-

Geometry set operations

+ +
+

Geometry set operations#

-
-
-

Callbacks

+ +
+

Callbacks#

-
-
-

Metrics and Losses

+ +
+

Metrics and Losses#

-
- + + - - - - - - + + + + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/_contributing.html b/_rst/_contributing.html index 74618ac0..e8195830 100644 --- a/_rst/_contributing.html +++ b/_rst/_contributing.html @@ -1,103 +1,475 @@ + - - - - - How to contribute — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + How to contribute — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + - - -
- -
-
-
-
+ + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

How to contribute

+
+ + + + + +
+ +
+

How to contribute#

We’d love to accept your patches and contributions to this project. There are just a few small guidelines you need to follow.

-
-

Submitting a patch

+
+

Submitting a patch#

  1. It’s generally best to start by opening a new issue describing the bug or @@ -126,40 +498,98 @@

    Submitting a patch - - -

+
+ + + + + +
+ + + +
+ - +
+ +
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/_installation.html b/_rst/_installation.html index 8211f36e..84fb6857 100644 --- a/_rst/_installation.html +++ b/_rst/_installation.html @@ -1,158 +1,588 @@ + - - - - - Installation — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Installation — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + - - -
- -
-
-
-
+ + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

Installation

-

PINA requires requires numpy, scipy, matplotlib, future, torch, sphinx (for the documentation) and pytest (for local test). The code is tested for Python 3, while compatibility of Python 2 is not guaranteed anymore. It can be installed using pip or directly from the source code.

-
-

Installing via PIP

+
+ + + + + +
+ +
+

Installation#

+

PINA requires requires numpy, matplotlib, torch, lightning, sphinx (for the documentation) and pytest (for local test). The code is tested for Python 3, while compatibility of Python 2 is not guaranteed anymore. It can be installed using pip or directly from the source code.

+
+

Installing via PIP#

Mac and Linux users can install pre-built binary packages using pip. To install the package just type:

-
$ pip install git+https://github.com/mathLab/PINA.git
+
$ pip install pina-mathlab
 

To uninstall the package:

-
$ pip uninstall pina
+
$ pip uninstall pina-mathlab
 
-
-
-

Installing from source

+
+
+

Installing from source#

The official distribution is on GitHub, and you can clone the repository using

-
$ git clone https://github.com/mathLab/PINA
+
$ git clone https://github.com/mathLab/PINA
 

To install the package just type:

-
$ pip install -e .
+
$ pip install -e .
 
-
-
+
+
-
-
-
+ + + +
+ - +
+ +
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/_tutorial.html b/_rst/_tutorial.html index d8764ee6..1ac352ea 100644 --- a/_rst/_tutorial.html +++ b/_rst/_tutorial.html @@ -1,127 +1,497 @@ + - - - - - PINA Tutorials — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + PINA Tutorials — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
- -
-
+ + + + + + + + + + + + + + + + + - - - -
- -
-
-
-
+
+

Neural Operator Learning#

- -
-

Supervised Learning

+
+
+

Supervised Learning#

- - +
+ - - - - - - + + + + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveCELU.html b/_rst/adaptive_functions/AdaptiveCELU.html index 2c561490..2e171b09 100644 --- a/_rst/adaptive_functions/AdaptiveCELU.html +++ b/_rst/adaptive_functions/AdaptiveCELU.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveCELU — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveCELU — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveCELU

-
-
-class AdaptiveCELU(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable CELU activation function.

+
+ + + + + +
+ +
+

AdaptiveCELU#

+
+
+class AdaptiveCELU(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable CELU activation function.

Given the function \(\text{CELU}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{CELU}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -152,15 +596,15 @@

AdaptiveCELU -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -169,1300 +613,99 @@

    AdaptiveCELU -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveELU.html b/_rst/adaptive_functions/AdaptiveELU.html index c84c6aff..328232ba 100644 --- a/_rst/adaptive_functions/AdaptiveELU.html +++ b/_rst/adaptive_functions/AdaptiveELU.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveELU — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveELU — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveELU

-
-
-class AdaptiveELU(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable ELU activation function.

+
+ + + + + +
+ +
+

AdaptiveELU#

+
+
+class AdaptiveELU(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable ELU activation function.

Given the function \(\text{ELU}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{ELU}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -155,15 +599,15 @@

AdaptiveELU -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -172,1300 +616,99 @@

    AdaptiveELU -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveExp.html b/_rst/adaptive_functions/AdaptiveExp.html index 26d505ca..e07dd317 100644 --- a/_rst/adaptive_functions/AdaptiveExp.html +++ b/_rst/adaptive_functions/AdaptiveExp.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveExp — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveExp — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveExp

-
-
-class AdaptiveExp(alpha=None, beta=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable exp function.

+
+ + + + + +
+ +
+

AdaptiveExp#

+
+
+class AdaptiveExp(alpha=None, beta=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable exp function.

Given the function \(\text{exp}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{exp}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -149,15 +593,15 @@

AdaptiveExp -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -166,1300 +610,99 @@

    AdaptiveExp -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveFunctionInterface.html b/_rst/adaptive_functions/AdaptiveFunctionInterface.html index ac4388d9..c50da24e 100644 --- a/_rst/adaptive_functions/AdaptiveFunctionInterface.html +++ b/_rst/adaptive_functions/AdaptiveFunctionInterface.html @@ -1,134 +1,578 @@ + - - - - - AdaptiveActivationFunctionInterface — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + AdaptiveActivationFunctionInterface — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    AdaptiveActivationFunctionInterface

    +
    + + + + + +
    + +
    +

    AdaptiveActivationFunctionInterface#

    Module for adaptive functions.

    -
    -
    -class AdaptiveActivationFunctionInterface(alpha=None, beta=None, gamma=None, fixed=None)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    +
    +class AdaptiveActivationFunctionInterface(alpha=None, beta=None, gamma=None, fixed=None)[source]#
    +

    Bases: Module

    The AdaptiveActivationFunctionInterface -class makes a torch.nn.Module activation function into an adaptive +class makes a torch.nn.Module activation function into an adaptive trainable activation function. If one wants to create an adpative activation function, this class must be use as base class.

    Given a function \(f:\mathbb{R}^n\rightarrow\mathbb{R}^m\), the adaptive @@ -153,15 +597,15 @@

    Initializes the Adaptive Function.

    -
    Parameters
    +
    Parameters:
      -
    • | complex alpha (float) – Scaling parameter alpha. +

    • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

    • -
    • | complex beta (float) – Scaling parameter beta. +

    • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

    • -
    • | complex gamma (float) – Shifting parameter gamma. +

    • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

    • fixed (list) – List of parameters to fix during training, @@ -170,77 +614,142 @@

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Define the computation performed at every call. The function to the input elementwise.

    -
    Parameters
    -

    x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

    +
    Parameters:
    +

    x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

    -
    -
    -property alpha
    +
    +
    +property alpha#

    The alpha variable.

    -
    -
    -property beta
    +
    +
    +property beta#

    The beta variable.

    -
    -
    -property gamma
    +
    +
    +property gamma#

    The gamma variable.

    -
    -
    -property func
    +
    +
    +property func#

    The callable activation function.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveGELU.html b/_rst/adaptive_functions/AdaptiveGELU.html index bf31154a..1f4b3626 100644 --- a/_rst/adaptive_functions/AdaptiveGELU.html +++ b/_rst/adaptive_functions/AdaptiveGELU.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveGELU — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveGELU — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveGELU

-
-
-class AdaptiveGELU(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable GELU activation function.

+
+ + + + + +
+ +
+

AdaptiveGELU#

+
+
+class AdaptiveGELU(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable GELU activation function.

Given the function \(\text{GELU}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{GELU}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -152,15 +596,15 @@

AdaptiveGELU -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -169,1300 +613,99 @@

    AdaptiveGELU -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveMish.html b/_rst/adaptive_functions/AdaptiveMish.html index 098cd705..680347dd 100644 --- a/_rst/adaptive_functions/AdaptiveMish.html +++ b/_rst/adaptive_functions/AdaptiveMish.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveMish — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveMish — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveMish

-
-
-class AdaptiveMish(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable Mish activation function.

+
+ + + + + +
+ +
+

AdaptiveMish#

+
+
+class AdaptiveMish(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable Mish activation function.

Given the function \(\text{Mish}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{Mish}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -152,15 +596,15 @@

AdaptiveMish -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -169,1300 +613,99 @@

    AdaptiveMish -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveReLU.html b/_rst/adaptive_functions/AdaptiveReLU.html index dccf6a82..38c8189c 100644 --- a/_rst/adaptive_functions/AdaptiveReLU.html +++ b/_rst/adaptive_functions/AdaptiveReLU.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveReLU — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveReLU — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveReLU

-
-
-class AdaptiveReLU(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable ReLU activation function.

+
+ + + + + +
+ +
+

AdaptiveReLU#

+
+
+class AdaptiveReLU(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable ReLU activation function.

Given the function \(\text{ReLU}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{ReLU}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -152,15 +596,15 @@

AdaptiveReLU -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -169,1300 +613,99 @@

    AdaptiveReLU -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveSIREN.html b/_rst/adaptive_functions/AdaptiveSIREN.html index fc36fe4a..32551c2a 100644 --- a/_rst/adaptive_functions/AdaptiveSIREN.html +++ b/_rst/adaptive_functions/AdaptiveSIREN.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveSIREN — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveSIREN — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveSIREN

-
-
-class AdaptiveSIREN(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable sin function.

+
+ + + + + +
+ +
+

AdaptiveSIREN#

+
+
+class AdaptiveSIREN(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable sin function.

Given the function \(\text{sin}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{sin}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -149,15 +593,15 @@

AdaptiveSIREN -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -166,1300 +610,99 @@

    AdaptiveSIREN -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveSiLU.html b/_rst/adaptive_functions/AdaptiveSiLU.html index 38563c1f..ead05ae3 100644 --- a/_rst/adaptive_functions/AdaptiveSiLU.html +++ b/_rst/adaptive_functions/AdaptiveSiLU.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveSiLU — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveSiLU — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveSiLU

-
-
-class AdaptiveSiLU(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable SiLU activation function.

+
+ + + + + +
+ +
+

AdaptiveSiLU#

+
+
+class AdaptiveSiLU(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable SiLU activation function.

Given the function \(\text{SiLU}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{SiLU}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -153,15 +597,15 @@

AdaptiveSiLU -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -170,1300 +614,99 @@

    AdaptiveSiLU -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveSigmoid.html b/_rst/adaptive_functions/AdaptiveSigmoid.html index c49c238d..5cd9f6d1 100644 --- a/_rst/adaptive_functions/AdaptiveSigmoid.html +++ b/_rst/adaptive_functions/AdaptiveSigmoid.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveSigmoid — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveSigmoid — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveSigmoid

-
-
-class AdaptiveSigmoid(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable Sigmoid activation function.

+
+ + + + + +
+ +
+

AdaptiveSigmoid#

+
+
+class AdaptiveSigmoid(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable Sigmoid activation function.

Given the function \(\text{Sigmoid}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{Sigmoid}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -152,15 +596,15 @@

AdaptiveSigmoid -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -169,1300 +613,99 @@

    AdaptiveSigmoid -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveSoftmax.html b/_rst/adaptive_functions/AdaptiveSoftmax.html index 811f511f..166b71bd 100644 --- a/_rst/adaptive_functions/AdaptiveSoftmax.html +++ b/_rst/adaptive_functions/AdaptiveSoftmax.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveSoftmax — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveSoftmax — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveSoftmax

-
-
-class AdaptiveSoftmax(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable Softmax activation function.

+
+ + + + + +
+ +
+

AdaptiveSoftmax#

+
+
+class AdaptiveSoftmax(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable Softmax activation function.

Given the function \(\text{Softmax}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{Softmax}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -152,15 +596,15 @@

AdaptiveSoftmax -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -169,1300 +613,99 @@

    AdaptiveSoftmax -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveSoftmin.html b/_rst/adaptive_functions/AdaptiveSoftmin.html index bcefe7c6..d473dd48 100644 --- a/_rst/adaptive_functions/AdaptiveSoftmin.html +++ b/_rst/adaptive_functions/AdaptiveSoftmin.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveSoftmin — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveSoftmin — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveSoftmin

-
-
-class AdaptiveSoftmin(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable Softmin activation function.

+
+ + + + + +
+ +
+

AdaptiveSoftmin#

+
+
+class AdaptiveSoftmin(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable Softmin activation function.

Given the function \(\text{Softmin}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{Softmin}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -152,15 +596,15 @@

AdaptiveSoftmin -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -169,1300 +613,99 @@

    AdaptiveSoftmin -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/adaptive_functions/AdaptiveTanh.html b/_rst/adaptive_functions/AdaptiveTanh.html index f780b4b1..0573e623 100644 --- a/_rst/adaptive_functions/AdaptiveTanh.html +++ b/_rst/adaptive_functions/AdaptiveTanh.html @@ -1,131 +1,575 @@ + - - - - - AdaptiveTanh — PINA 0.1.1.post2407 documentation - - + + + + + + + + AdaptiveTanh — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + -
-
- - -
-
-
-
-
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ +
-
-
- -
-

AdaptiveTanh

-
-
-class AdaptiveTanh(alpha=None, beta=None, gamma=None, fixed=None)[source]
-

Bases: pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface

-

Adaptive trainable Tanh activation function.

+
+ + + + + +
+ +
+

AdaptiveTanh#

+
+
+class AdaptiveTanh(alpha=None, beta=None, gamma=None, fixed=None)[source]#
+

Bases: AdaptiveActivationFunctionInterface

+

Adaptive trainable Tanh activation function.

Given the function \(\text{Tanh}:\mathbb{R}^n\rightarrow\mathbb{R}^n\), the adaptive function \(\text{Tanh}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n\) @@ -152,15 +596,15 @@

AdaptiveTanh -
Parameters
+
Parameters:
    -
  • | complex alpha (float) – Scaling parameter alpha. +

  • alpha (float | complex) – Scaling parameter alpha. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex beta (float) – Scaling parameter beta. +

  • beta (float | complex) – Scaling parameter beta. Defaults to None. When None is passed, the variable is initialized to 1.

  • -
  • | complex gamma (float) – Shifting parameter gamma. +

  • gamma (float | complex) – Shifting parameter gamma. Defaults to None. When None is passed, the variable is initialized to 1.

  • fixed (list) – List of parameters to fix during training, @@ -169,1300 +613,99 @@

    AdaptiveTanh -
    -add_module(name: str, module: Optional[Module]) → None
    -

    Add a child module to the current module.

    -

    The module can be accessed as an attribute using the given name.

    -
    -
    Parameters
    -
      -
    • name (str) – name of the child module. The child module can be -accessed from this module using the given name

    • -
    • module (Module) – child module to be added to the module.

    • -
    -
    -
    -

- -
-
-property alpha
-

The alpha variable.

-
- -
-
-apply(fn: Callable[[Module], None]) → T
-

Apply fn recursively to every submodule (as returned by .children()) as well as self.

-

Typical use includes initializing the parameters of a model -(see also torch.nn.init).

-
-
Parameters
-

fn (Module -> None) – function to be applied to each submodule

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Example:

-
>>> @torch.no_grad()
->>> def init_weights(m):
->>>     print(m)
->>>     if type(m) == nn.Linear:
->>>         m.weight.fill_(1.0)
->>>         print(m.weight)
->>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
->>> net.apply(init_weights)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Linear(in_features=2, out_features=2, bias=True)
-Parameter containing:
-tensor([[1., 1.],
-        [1., 1.]], requires_grad=True)
-Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-
-
-
-
-property beta
-

The beta variable.

-
- -
-
-bfloat16() → T
-

Casts all floating point parameters and buffers to bfloat16 datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
+
-
-
-buffers(recurse: bool = True) → Iterator[torch.Tensor]
-

Return an iterator over module buffers.

-
-
Parameters
-

recurse (bool) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module.

-
-
Yields
-

torch.Tensor – module buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for buf in model.buffers():
->>>     print(type(buf), buf.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
-
-
-children() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over immediate children modules.

-
-
Yields
-

Module – a child module

-
-
-
+
+ + + + + +
+ + + +
+ + +
+
+ +
+ +
+
+
+ + + + -
-
-float() → T
-

Casts all floating point parameters and buffers to float datatype.

-
-

Note

-

This method modifies the module in-place.

+
+ -
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-forward(x)
-

Define the computation performed at every call. -The function to the input elementwise.

-
-
Parameters
-

x (torch.Tensor | LabelTensor) – The input tensor to evaluate the activation function.

-
-
-
- -
-
-property func
-

The callable activation function.

-
- -
-
-property gamma
-

The gamma variable.

-
- -
-
-get_buffer(target: str) → torch.Tensor
-

Return the buffer given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the buffer -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The buffer referenced by target

-
-
Return type
-

torch.Tensor

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not a - buffer

-
-
-
- -
-
-get_extra_state() → Any
-

Return any extra state to include in the module’s state_dict.

-

Implement this and a corresponding set_extra_state() for your module -if you need to store extra state. This function is called when building the -module’s state_dict().

-

Note that extra state should be picklable to ensure working serialization -of the state_dict. We only provide provide backwards compatibility guarantees -for serializing Tensors; other objects may break backwards compatibility if -their serialized pickled form changes.

-
-
Returns
-

Any extra state to store in the module’s state_dict

-
-
Return type
-

object

-
-
-
- -
-
-get_parameter(target: str) → torch.nn.parameter.Parameter
-

Return the parameter given by target if it exists, otherwise throw an error.

-

See the docstring for get_submodule for a more detailed -explanation of this method’s functionality as well as how to -correctly specify target.

-
-
Parameters
-

target – The fully-qualified string name of the Parameter -to look for. (See get_submodule for how to specify a -fully-qualified string.)

-
-
Returns
-

The Parameter referenced by target

-
-
Return type
-

torch.nn.Parameter

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Parameter

-
-
-
- -
-
-get_submodule(target: str) → torch.nn.modules.module.Module
-

Return the submodule given by target if it exists, otherwise throw an error.

-

For example, let’s say you have an nn.Module A that -looks like this:

-
A(
-    (net_b): Module(
-        (net_c): Module(
-            (conv): Conv2d(16, 33, kernel_size=(3, 3), stride=(2, 2))
-        )
-        (linear): Linear(in_features=100, out_features=200, bias=True)
-    )
-)
-
-
-

(The diagram shows an nn.Module A. A has a nested -submodule net_b, which itself has two submodules net_c -and linear. net_c then has a submodule conv.)

-

To check whether or not we have the linear submodule, we -would call get_submodule("net_b.linear"). To check whether -we have the conv submodule, we would call -get_submodule("net_b.net_c.conv").

-

The runtime of get_submodule is bounded by the degree -of module nesting in target. A query against -named_modules achieves the same result, but it is O(N) in -the number of transitive modules. So, for a simple check to see -if some submodule exists, get_submodule should always be -used.

-
-
Parameters
-

target – The fully-qualified string name of the submodule -to look for. (See above example for how to specify a -fully-qualified string.)

-
-
Returns
-

The submodule referenced by target

-
-
Return type
-

torch.nn.Module

-
-
Raises
-

AttributeError – If the target string references an invalid - path or resolves to something that is not an - nn.Module

-
-
-
- -
-
-half() → T
-

Casts all floating point parameters and buffers to half datatype.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-ipu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the IPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on IPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-load_state_dict(state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False)
-

Copy parameters and buffers from state_dict into this module and its descendants.

-

If strict is True, then -the keys of state_dict must exactly match the keys returned -by this module’s state_dict() function.

-
-

Warning

-

If assign is True the optimizer must be created after -the call to load_state_dict unless -get_swap_module_params_on_conversion() is True.

-
-
-
Parameters
-
    -
  • state_dict (dict) – a dict containing parameters and -persistent buffers.

  • -
  • strict (bool, optional) – whether to strictly enforce that the keys -in state_dict match the keys returned by this module’s -state_dict() function. Default: True

  • -
  • assign (bool, optional) – When False, the properties of the tensors -in the current module are preserved while when True, the -properties of the Tensors in the state dict are preserved. The only -exception is the requires_grad field of -Default: ``False`

  • -
-
-
Returns
-

    -
  • missing_keys is a list of str containing the missing keys

  • -
  • unexpected_keys is a list of str containing the unexpected keys

  • -
-

-
-
Return type
-

NamedTuple with missing_keys and unexpected_keys fields

-
-
-
-

Note

-

If a parameter or buffer is registered as None and its corresponding key -exists in state_dict, load_state_dict() will raise a -RuntimeError.

-
-
- -
-
-modules() → Iterator[torch.nn.modules.module.Module]
-

Return an iterator over all modules in the network.

-
-
Yields
-

Module – a module in the network

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.modules()):
-...     print(idx, '->', m)
-
-0 -> Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-)
-1 -> Linear(in_features=2, out_features=2, bias=True)
-
-
-
- -
-
-named_buffers(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.Tensor]]
-

Return an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all buffer names.

  • -
  • recurse (bool, optional) – if True, then yields buffers of this module -and all submodules. Otherwise, yields only buffers that -are direct members of this module. Defaults to True.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated buffers in the result. Defaults to True.

  • -
-
-
Yields
-

(str, torch.Tensor) – Tuple containing the name and buffer

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, buf in self.named_buffers():
->>>     if name in ['running_var']:
->>>         print(buf.size())
-
-
-
- -
-
-named_children() → Iterator[Tuple[str, torch.nn.modules.module.Module]]
-

Return an iterator over immediate children modules, yielding both the name of the module as well as the module itself.

-
-
Yields
-

(str, Module) – Tuple containing a name and child module

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, module in model.named_children():
->>>     if name in ['conv4', 'conv5']:
->>>         print(module)
-
-
-
- -
-
-named_modules(memo: Optional[Set[Module]] = None, prefix: str = '', remove_duplicate: bool = True)
-

Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself.

-
-
Parameters
-
    -
  • memo – a memo to store the set of modules already added to the result

  • -
  • prefix – a prefix that will be added to the name of the module

  • -
  • remove_duplicate – whether to remove the duplicated module instances in the result -or not

  • -
-
-
Yields
-

(str, Module) – Tuple of name and module

-
-
-
-

Note

-

Duplicate modules are returned only once. In the following -example, l will be returned only once.

-
-

Example:

-
>>> l = nn.Linear(2, 2)
->>> net = nn.Sequential(l, l)
->>> for idx, m in enumerate(net.named_modules()):
-...     print(idx, '->', m)
-
-0 -> ('', Sequential(
-  (0): Linear(in_features=2, out_features=2, bias=True)
-  (1): Linear(in_features=2, out_features=2, bias=True)
-))
-1 -> ('0', Linear(in_features=2, out_features=2, bias=True))
-
-
-
- -
-
-named_parameters(prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) → Iterator[Tuple[str, torch.nn.parameter.Parameter]]
-

Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself.

-
-
Parameters
-
    -
  • prefix (str) – prefix to prepend to all parameter names.

  • -
  • recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

  • -
  • remove_duplicate (bool, optional) – whether to remove the duplicated -parameters in the result. Defaults to True.

  • -
-
-
Yields
-

(str, Parameter) – Tuple containing the name and parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for name, param in self.named_parameters():
->>>     if name in ['bias']:
->>>         print(param.size())
-
-
-
- -
-
-parameters(recurse: bool = True) → Iterator[torch.nn.parameter.Parameter]
-

Return an iterator over module parameters.

-

This is typically passed to an optimizer.

-
-
Parameters
-

recurse (bool) – if True, then yields parameters of this module -and all submodules. Otherwise, yields only parameters that -are direct members of this module.

-
-
Yields
-

Parameter – module parameter

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> for param in model.parameters():
->>>     print(type(param), param.size())
-<class 'torch.Tensor'> (20L,)
-<class 'torch.Tensor'> (20L, 1L, 5L, 5L)
-
-
-
- -
-
-register_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]]) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

This function is deprecated in favor of register_full_backward_hook() and -the behavior of this function will change in future versions.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_buffer(name: str, tensor: Optional[torch.Tensor], persistent: bool = True) → None
-

Add a buffer to the module.

-

This is typically used to register a buffer that should not to be -considered a model parameter. For example, BatchNorm’s running_mean -is not a parameter, but is part of the module’s state. Buffers, by -default, are persistent and will be saved alongside parameters. This -behavior can be changed by setting persistent to False. The -only difference between a persistent buffer and a non-persistent buffer -is that the latter will not be a part of this module’s -state_dict.

-

Buffers can be accessed as attributes using given names.

-
-
Parameters
-
    -
  • name (str) – name of the buffer. The buffer can be accessed -from this module using the given name

  • -
  • tensor (Tensor or None) – buffer to be registered. If None, then operations -that run on buffers, such as cuda, are ignored. If None, -the buffer is not included in the module’s state_dict.

  • -
  • persistent (bool) – whether the buffer is part of this module’s -state_dict.

  • -
-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> self.register_buffer('running_mean', torch.zeros(num_features))
-
-
-
- -
-
-register_forward_hook(hook: Union[Callable[[T, Tuple[Any, ...], Any], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any], Any], Optional[Any]]], *, prepend: bool = False, with_kwargs: bool = False, always_call: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward hook on the module.

-

The hook will be called every time after forward() has computed an output.

-

If with_kwargs is False or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -output. It can modify the input inplace but it will not have effect on -forward since this is called after forward() is called. The hook -should have the following signature:

-
hook(module, args, output) -> None or modified output
-
-
-

If with_kwargs is True, the forward hook will be passed the -kwargs given to the forward function and be expected to return the -output possibly modified. The hook should have the following signature:

-
hook(module, args, kwargs, output) -> None or modified output
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If True, the provided hook will be fired -before all existing forward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward hooks on -this torch.nn.modules.Module. Note that global -forward hooks registered with -register_module_forward_hook() will fire before all hooks -registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If True, the hook will be passed the -kwargs given to the forward function. -Default: False

  • -
  • always_call (bool) – If True the hook will be run regardless of -whether an exception is raised while calling the Module. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_forward_pre_hook(hook: Union[Callable[[T, Tuple[Any, ...]], Optional[Any]], Callable[[T, Tuple[Any, ...], Dict[str, Any]], Optional[Tuple[Any, Dict[str, Any]]]]], *, prepend: bool = False, with_kwargs: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a forward pre-hook on the module.

-

The hook will be called every time before forward() is invoked.

-

If with_kwargs is false or not specified, the input contains only -the positional arguments given to the module. Keyword arguments won’t be -passed to the hooks and only to the forward. The hook can modify the -input. User can either return a tuple or a single modified value in the -hook. We will wrap the value into a tuple if a single value is returned -(unless that value is already a tuple). The hook should have the -following signature:

-
hook(module, args) -> None or modified input
-
-
-

If with_kwargs is true, the forward pre-hook will be passed the -kwargs given to the forward function. And if the hook modifies the -input, both the args and kwargs should be returned. The hook should have -the following signature:

-
hook(module, args, kwargs) -> None or a tuple of modified input and kwargs
-
-
-
-
Parameters
-
    -
  • hook (Callable) – The user defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing forward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing forward_pre hooks -on this torch.nn.modules.Module. Note that global -forward_pre hooks registered with -register_module_forward_pre_hook() will fire before all -hooks registered by this method. -Default: False

  • -
  • with_kwargs (bool) – If true, the hook will be passed the kwargs -given to the forward function. -Default: False

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward hook on the module.

-

The hook will be called every time the gradients with respect to a module -are computed, i.e. the hook will execute if and only if the gradients with -respect to module outputs are computed. The hook should have the following -signature:

-
hook(module, grad_input, grad_output) -> tuple(Tensor) or None
-
-
-

The grad_input and grad_output are tuples that contain the gradients -with respect to the inputs and outputs respectively. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the input that will be used in place of grad_input in -subsequent computations. grad_input will only correspond to the inputs given -as positional arguments and all kwarg arguments are ignored. Entries -in grad_input and grad_output will be None for all non-Tensor -arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs or outputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward hooks on -this torch.nn.modules.Module. Note that global -backward hooks registered with -register_module_full_backward_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_full_backward_pre_hook(hook: Callable[[Module, Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[None, Tuple[torch.Tensor, ...], torch.Tensor]], prepend: bool = False) → torch.utils.hooks.RemovableHandle
-

Register a backward pre-hook on the module.

-

The hook will be called every time the gradients for the module are computed. -The hook should have the following signature:

-
hook(module, grad_output) -> tuple[Tensor] or None
-
-
-

The grad_output is a tuple. The hook should -not modify its arguments, but it can optionally return a new gradient with -respect to the output that will be used in place of grad_output in -subsequent computations. Entries in grad_output will be None for -all non-Tensor arguments.

-

For technical reasons, when this hook is applied to a Module, its forward function will -receive a view of each Tensor passed to the Module. Similarly the caller will receive a view -of each Tensor returned by the Module’s forward function.

-
-

Warning

-

Modifying inputs inplace is not allowed when using backward hooks and -will raise an error.

-
-
-
Parameters
-
    -
  • hook (Callable) – The user-defined hook to be registered.

  • -
  • prepend (bool) – If true, the provided hook will be fired before -all existing backward_pre hooks on this -torch.nn.modules.Module. Otherwise, the provided -hook will be fired after all existing backward_pre hooks -on this torch.nn.modules.Module. Note that global -backward_pre hooks registered with -register_module_full_backward_pre_hook() will fire before -all hooks registered by this method.

  • -
-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_load_state_dict_post_hook(hook)
-

Register a post hook to be run after module’s load_state_dict is called.

-
-
It should have the following signature::

hook(module, incompatible_keys) -> None

-
-
-

The module argument is the current module that this hook is registered -on, and the incompatible_keys argument is a NamedTuple consisting -of attributes missing_keys and unexpected_keys. missing_keys -is a list of str containing the missing keys and -unexpected_keys is a list of str containing the unexpected keys.

-

The given incompatible_keys can be modified inplace if needed.

-

Note that the checks performed when calling load_state_dict() with -strict=True are affected by modifications the hook makes to -missing_keys or unexpected_keys, as expected. Additions to either -set of keys will result in an error being thrown when strict=True, and -clearing out both missing and unexpected keys will avoid an error.

-
-
Returns
-

a handle that can be used to remove the added hook by calling -handle.remove()

-
-
Return type
-

torch.utils.hooks.RemovableHandle

-
-
-
- -
-
-register_module(name: str, module: Optional[Module]) → None
-

Alias for add_module().

-
- -
-
-register_parameter(name: str, param: Optional[torch.nn.parameter.Parameter]) → None
-

Add a parameter to the module.

-

The parameter can be accessed as an attribute using given name.

-
-
Parameters
-
    -
  • name (str) – name of the parameter. The parameter can be accessed -from this module using the given name

  • -
  • param (Parameter or None) – parameter to be added to the module. If -None, then operations that run on parameters, such as cuda, -are ignored. If None, the parameter is not included in the -module’s state_dict.

  • -
-
-
-
- -
-
-register_state_dict_pre_hook(hook)
-

Register a pre-hook for the state_dict() method.

-

These hooks will be called with arguments: self, prefix, -and keep_vars before calling state_dict on self. The registered -hooks can be used to perform pre-processing before the state_dict -call is made.

-
- -
-
-requires_grad_(requires_grad: bool = True) → T
-

Change if autograd should record operations on parameters in this module.

-

This method sets the parameters’ requires_grad attributes -in-place.

-

This method is helpful for freezing part of the module for finetuning -or training parts of a model individually (e.g., GAN training).

-

See Locally disabling gradient computation for a comparison between -.requires_grad_() and several similar mechanisms that may be confused with it.

-
-
Parameters
-

requires_grad (bool) – whether autograd should record operations on -parameters in this module. Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-set_extra_state(state: Any) → None
-

Set extra state contained in the loaded state_dict.

-

This function is called from load_state_dict() to handle any extra state -found within the state_dict. Implement this function and a corresponding -get_extra_state() for your module if you need to store extra state within its -state_dict.

-
-
Parameters
-

state (dict) – Extra state from the state_dict

-
-
-
- -
-
-share_memory() → T
-

See torch.Tensor.share_memory_().

-
- -
-
-state_dict(*args, destination=None, prefix='', keep_vars=False)
-

Return a dictionary containing references to the whole state of the module.

-

Both parameters and persistent buffers (e.g. running averages) are -included. Keys are corresponding parameter and buffer names. -Parameters and buffers set to None are not included.

-
-

Note

-

The returned object is a shallow copy. It contains references -to the module’s parameters and buffers.

-
-
-

Warning

-

Currently state_dict() also accepts positional arguments for -destination, prefix and keep_vars in order. However, -this is being deprecated and keyword arguments will be enforced in -future releases.

-
-
-

Warning

-

Please avoid the use of argument destination as it is not -designed for end-users.

-
-
-
Parameters
-
    -
  • destination (dict, optional) – If provided, the state of module will -be updated into the dict and the same object is returned. -Otherwise, an OrderedDict will be created and returned. -Default: None.

  • -
  • prefix (str, optional) – a prefix added to parameter and buffer -names to compose the keys in state_dict. Default: ''.

  • -
  • keep_vars (bool, optional) – by default the Tensor s -returned in the state dict are detached from autograd. If it’s -set to True, detaching will not be performed. -Default: False.

  • -
-
-
Returns
-

a dictionary containing a whole state of the module

-
-
Return type
-

dict

-
-
-

Example:

-
>>> # xdoctest: +SKIP("undefined vars")
->>> module.state_dict().keys()
-['bias', 'weight']
-
-
-
- -
-
-to(*args, **kwargs)
-

Move and/or cast the parameters and buffers.

-

This can be called as

-
-
-to(device=None, dtype=None, non_blocking=False)
-
- -
-
-to(dtype, non_blocking=False)
-
- -
-
-to(tensor, non_blocking=False)
-
- -
-
-to(memory_format=torch.channels_last)
-
- -

Its signature is similar to torch.Tensor.to(), but only accepts -floating point or complex dtypes. In addition, this method will -only cast the floating point or complex parameters and buffers to dtype -(if given). The integral parameters and buffers will be moved -device, if that is given, but with dtypes unchanged. When -non_blocking is set, it tries to convert/move asynchronously -with respect to the host if possible, e.g., moving CPU Tensors with -pinned memory to CUDA devices.

-

See below for examples.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-
    -
  • device (torch.device) – the desired device of the parameters -and buffers in this module

  • -
  • dtype (torch.dtype) – the desired floating point or complex dtype of -the parameters and buffers in this module

  • -
  • tensor (torch.Tensor) – Tensor whose dtype and device are the desired -dtype and device for all parameters and buffers in this module

  • -
  • memory_format (torch.memory_format) – the desired memory -format for 4D parameters and buffers in this module (keyword -only argument)

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-

Examples:

-
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
->>> linear = nn.Linear(2, 2)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]])
->>> linear.to(torch.double)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1913, -0.3420],
-        [-0.5113, -0.2325]], dtype=torch.float64)
->>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA1)
->>> gpu1 = torch.device("cuda:1")
->>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
->>> cpu = torch.device("cpu")
->>> linear.to(cpu)
-Linear(in_features=2, out_features=2, bias=True)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.1914, -0.3420],
-        [-0.5112, -0.2324]], dtype=torch.float16)
-
->>> linear = nn.Linear(2, 2, bias=None).to(torch.cdouble)
->>> linear.weight
-Parameter containing:
-tensor([[ 0.3741+0.j,  0.2382+0.j],
-        [ 0.5593+0.j, -0.4443+0.j]], dtype=torch.complex128)
->>> linear(torch.ones(3, 2, dtype=torch.cdouble))
-tensor([[0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j],
-        [0.6122+0.j, 0.1150+0.j]], dtype=torch.complex128)
-
-
-
- -
-
-to_empty(*, device: Union[int, str, torch.device, None], recurse: bool = True) → T
-

Move the parameters and buffers to the specified device without copying storage.

-
-
Parameters
-
    -
  • device (torch.device) – The desired device of the parameters -and buffers in this module.

  • -
  • recurse (bool) – Whether parameters and buffers of submodules should -be recursively moved to the specified device.

  • -
-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-train(mode: bool = True) → T
-

Set the module in training mode.

-

This has any effect only on certain modules. See documentations of -particular modules for details of their behaviors in training/evaluation -mode, if they are affected, e.g. Dropout, BatchNorm, -etc.

-
-
Parameters
-

mode (bool) – whether to set training mode (True) or evaluation -mode (False). Default: True.

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-type(dst_type: Union[torch.dtype, str]) → T
-

Casts all parameters and buffers to dst_type.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

dst_type (type or string) – the desired type

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-xpu(device: Union[int, torch.device, None] = None) → T
-

Move all model parameters and buffers to the XPU.

-

This also makes associated parameters and buffers different objects. So -it should be called before constructing optimizer if the module will -live on XPU while being optimized.

-
-

Note

-

This method modifies the module in-place.

-
-
-
Parameters
-

device (int, optional) – if specified, all parameters will be -copied to that device

-
-
Returns
-

self

-
-
Return type
-

Module

-
-
-
- -
-
-zero_grad(set_to_none: bool = True) → None
-

Reset gradients of all model parameters.

-

See similar function under torch.optim.Optimizer for more context.

-
-
Parameters
-

set_to_none (bool) – instead of setting to zero, set the grads to None. -See torch.optim.Optimizer.zero_grad() for details.

-
-
-
- - - -
- - - - - - - - - - - + + \ No newline at end of file diff --git a/_rst/callbacks/adaptive_refinment_callbacks.html b/_rst/callbacks/adaptive_refinment_callbacks.html index 1dc6b2bf..774275a1 100644 --- a/_rst/callbacks/adaptive_refinment_callbacks.html +++ b/_rst/callbacks/adaptive_refinment_callbacks.html @@ -1,120 +1,573 @@ + - - - - - Adaptive Refinments callbacks — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Adaptive Refinments callbacks — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Adaptive Refinments callbacks

    -
    -
    -class R3Refinement(sample_every)[source]
    -

    Bases: pytorch_lightning.callbacks.callback.Callback

    +
    + + + + + +
    + +
    +

    Adaptive Refinments callbacks#

    +
    +
    +class R3Refinement(sample_every)[source]#
    +

    Bases: Callback

    PINA Implementation of an R3 Refinement Callback.

    This callback implements the R3 (Retain-Resample-Release) routine for sampling new points based on adaptive search. @@ -130,10 +583,10 @@

    Adaptive Refinments callbacks10.48550/arXiv.2207.02338

    -
    Parameters
    +
    Parameters:

    sample_every (int) – Frequency for sampling.

    -
    Raises
    +
    Raises:

    ValueError – If sample_every is not an integer.

    @@ -141,85 +594,147 @@

    Adaptive Refinments callbacks
    >>> r3_callback = R3Refinement(sample_every=5)
     

    -
    -
    -on_train_start(trainer, _)[source]
    +
    +
    +on_train_start(trainer, _)[source]#

    Callback function called at the start of training.

    This method extracts the locations for sampling from the problem conditions and calculates the total population.

    -
    Parameters
    +
    Parameters:
    • trainer (pytorch_lightning.Trainer) – The trainer object managing the training process.

    • _ – Placeholder argument (not used).

    -
    Returns
    +
    Returns:

    None

    -
    Return type
    -

    None

    +
    Return type:
    +

    None

    -
    -
    -on_train_epoch_end(trainer, __)[source]
    +
    +
    +on_train_epoch_end(trainer, __)[source]#

    Callback function called at the end of each training epoch.

    This method triggers the R3 routine for refinement if the current epoch is a multiple of _sample_every.

    -
    Parameters
    +
    Parameters:
    • trainer (pytorch_lightning.Trainer) – The trainer object managing the training process.

    • __ – Placeholder argument (not used).

    -
    Returns
    +
    Returns:

    None

    -
    Return type
    -

    None

    +
    Return type:
    +

    None

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/callbacks/optimizer_callbacks.html b/_rst/callbacks/optimizer_callbacks.html index bd2b970e..cd60113a 100644 --- a/_rst/callbacks/optimizer_callbacks.html +++ b/_rst/callbacks/optimizer_callbacks.html @@ -1,134 +1,587 @@ + - - - - - Optimizer callbacks — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Optimizer callbacks — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Optimizer callbacks

    -
    -
    -class SwitchOptimizer(new_optimizers, new_optimizers_kwargs, epoch_switch)[source]
    -

    Bases: pytorch_lightning.callbacks.callback.Callback

    +
    + + + + + +
    + +
    +

    Optimizer callbacks#

    +
    +
    +class SwitchOptimizer(new_optimizers, new_optimizers_kwargs, epoch_switch)[source]#
    +

    Bases: Callback

    PINA Implementation of a Lightning Callback to switch optimizer during training.

    This callback allows for switching between different optimizers during training, enabling the exploration of multiple optimization strategies without the need to stop training.

    -
    Parameters
    +
    Parameters:
      -
    • new_optimizers (torch.optim.Optimizer | list) – The model optimizers to switch to. Can be a single -torch.optim.Optimizer or a list of them for multiple model solvers.

    • -
    • new_optimizers_kwargs (dict | list) – The keyword arguments for the new optimizers. Can be a single dictionary +

    • new_optimizers (torch.optim.Optimizer | list) – The model optimizers to switch to. Can be a single +torch.optim.Optimizer or a list of them for multiple model solvers.

    • +
    • new_optimizers_kwargs (dict | list) – The keyword arguments for the new optimizers. Can be a single dictionary or a list of dictionaries corresponding to each optimizer.

    • epoch_switch (int) – The epoch at which to switch to the new optimizer.

    -
    Raises
    +
    Raises:

    ValueError – If epoch_switch is less than 1 or if there is a mismatch in the number of optimizers and their corresponding keyword argument dictionaries.

    @@ -139,61 +592,122 @@

    Optimizer callbacks>>> epoch_switch=10)

    -
    -
    -on_train_epoch_start(trainer, __)[source]
    +
    +
    +on_train_epoch_start(trainer, __)[source]#

    Callback function to switch optimizer at the start of each training epoch.

    -
    Parameters
    +
    Parameters:
    • trainer (pytorch_lightning.Trainer) – The trainer object managing the training process.

    • _ – Placeholder argument (not used).

    -
    Returns
    +
    Returns:

    None

    -
    Return type
    -

    None

    +
    Return type:
    +

    None

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/callbacks/processing_callbacks.html b/_rst/callbacks/processing_callbacks.html index b27d1197..63fc18c7 100644 --- a/_rst/callbacks/processing_callbacks.html +++ b/_rst/callbacks/processing_callbacks.html @@ -1,134 +1,591 @@ + - - - - - Processing callbacks — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Processing callbacks — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    Processing callbacks

    -
    -
    -class MetricTracker[source]
    -

    Bases: pytorch_lightning.callbacks.callback.Callback

    + +
    + + +
    +
    + + + + + +
    + +
    +

    Processing callbacks#

    +
    +
    +class MetricTracker[source]#
    +

    Bases: Callback

    PINA Implementation of a Lightning Callback for Metric Tracking.

    -

    This class provides functionality to track relevant metrics during the training process.

    +

    This class provides functionality to track relevant metrics during +the training process.

    -
    Variables
    -

    _collection – A list to store collected metrics after each training epoch.

    +
    Variables:
    +

    _collection – A list to store collected metrics after each

    -
    Parameters
    -

    trainer (pytorch_lightning.Trainer) – The trainer object managing the training process.

    +
    +

    training epoch.

    +
    +
    Parameters:
    +

    trainer (pytorch_lightning.Trainer) – The trainer object managing the training process.

    -
    Returns
    -

    A dictionary containing aggregated metric values.

    +
    Returns:
    +

    A dictionary containing aggregated metric values.

    -
    Return type
    -

    dict

    +
    Return type:
    +

    dict

    Example

    @@ -137,35 +594,29 @@

    Processing callbacks>>> metrics = tracker.metrics

    -
    -
    -on_train_epoch_end(trainer, __)[source]
    +
    +
    +on_train_epoch_end(trainer, pl_module)[source]#

    Collect and track metrics at the end of each training epoch.

    -
    Parameters
    +
    Parameters:
    • trainer (pytorch_lightning.Trainer) – The trainer object managing the training process.

    • -
    • _ – Placeholder argument.

    • +
    • pl_module – Placeholder argument.

    -
    Returns
    -

    None

    -
    -
    Return type
    -

    None

    -
    -
    -
    -property metrics
    +
    +
    +property metrics#

    Aggregate collected metrics during training.

    -
    Returns
    +
    Returns:

    A dictionary containing aggregated metric values.

    -
    Return type
    +
    Return type:

    dict

    @@ -173,39 +624,174 @@

    Processing callbacks

    +
    +
    +class PINAProgressBar(metrics='mean', **kwargs)[source]#
    +

    Bases: TQDMProgressBar

    +

    PINA Implementation of a Lightning Callback for enriching the progress +bar.

    +

    This class provides functionality to display only relevant metrics +during the training process.

    +
    +
    Parameters:
    +

    metrics (str | list(str) | tuple(str)) – Logged metrics to display during the training. It should +be a subset of the conditions keys defined in +pina.condition.Condition.

    +
    +
    Keyword Arguments:
    +

    The additional keyword arguments specify the progress bar +and can be choosen from the pytorch-lightning +TQDMProgressBar API

    +
    +
    +

    Example

    +
    >>> pbar = PINAProgressBar(['mean'])
    +>>> # ... Perform training ...
    +>>> trainer = Trainer(solver, callbacks=[pbar])
    +
    +
    +
    +get_metrics(trainer, pl_module)[source]#
    +

    Combines progress bar metrics collected from the trainer with +standard metrics from get_standard_metrics. +Implement this to override the items displayed in the progress bar. +The progress bar metrics are sorted according to metrics.

    +

    Here is an example of how to override the defaults:

    +
    def get_metrics(self, trainer, model):
    +    # don't show the version number
    +    items = super().get_metrics(trainer, model)
    +    items.pop("v_num", None)
    +    return items
    +
    +
    +
    +
    Returns:
    +

    Dictionary with the items to be displayed in the progress bar.

    +
    +
    Return type:
    +

    tuple(dict)

    +
    +
    +
    +
    +
    +on_fit_start(trainer, pl_module)[source]#
    +

    Check that the metrics defined in the initialization are available, +i.e. are correctly logged.

    +
    +
    Parameters:
    +
      +
    • trainer (pytorch_lightning.Trainer) – The trainer object managing the training process.

    • +
    • pl_module – Placeholder argument.

    • +
    +
    +
    +
    + +
    + +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/condition.html b/_rst/condition.html index 64b10370..c84ce3cc 100644 --- a/_rst/condition.html +++ b/_rst/condition.html @@ -1,120 +1,572 @@ + - - - - - Condition — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Condition — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
+ - +
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/equations.html b/_rst/equations.html index 90cd592a..0c47e0e6 100644 --- a/_rst/equations.html +++ b/_rst/equations.html @@ -1,135 +1,589 @@ + - - - - - Equations — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Equations — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
+ +
+ + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/geometry/cartesian.html b/_rst/geometry/cartesian.html index 2e85c9f9..5f3d884f 100644 --- a/_rst/geometry/cartesian.html +++ b/_rst/geometry/cartesian.html @@ -1,157 +1,609 @@ + - - - - - CartesianDomain — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + CartesianDomain — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    CartesianDomain

    -
    -
    -class CartesianDomain(cartesian_dict)[source]
    -

    Bases: pina.geometry.location.Location

    + +
    + + +
    +
    + + + + + +
    + +
    +

    CartesianDomain#

    +
    +
    +class CartesianDomain(cartesian_dict)[source]
    +

    Bases: Location

    PINA implementation of Hypercube domain.

    -
    Parameters
    +
    Parameters:

    cartesian_dict (dict) – A dictionary with dict-key a string representing the input variables for the pinn, and dict-value a list with the domain extrema.

    -
    Example
    +
    Example:
    >>> spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
     
    -
    -
    -property variables
    +
    +
    +property variables

    Spatial variables.

    -
    Returns
    +
    Returns:

    Spatial variables defined in __init__()

    -
    Return type
    +
    Return type:

    list[str]

    -
    -
    -update(new_domain)[source]
    +
    +
    +update(new_domain)[source]

    Adding new dimensions on the CartesianDomain

    -
    Parameters
    +
    Parameters:

    new_domain (CartesianDomain) – A new CartesianDomain object to merge

    -
    Example
    +
    Example:
    >>> spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
     >>> spatial_domain.variables
     ['x', 'y']
    @@ -165,12 +617,12 @@
     
    -
    -
    -sample(n, mode='random', variables='all')[source]
    +
    +
    +sample(n, mode='random', variables='all')[source]

    Sample routine.

    -
    Parameters
    +
    Parameters:
    • n (int) – Number of points to sample, see Note below for reference.

    • @@ -178,13 +630,13 @@ Available modes include: random sampling, random; latin hypercube sampling, latin or lh; chebyshev sampling, chebyshev; grid sampling grid.

      -
    • variables (str | list[str]) – pinn variable to be sampled, defaults to all.

    • +
    • variables (str | list[str]) – pinn variable to be sampled, defaults to all.

    -
    Returns
    +
    Returns:

    Returns LabelTensor of n sampled points.

    -
    Return type
    +
    Return type:

    LabelTensor

    @@ -203,7 +655,7 @@

    The extrema values of Span are always sampled only for grid mode.

    -
    Example
    +
    Example:
    >>> spatial_domain = Span({'x': [0, 1], 'y': [0, 1]})
     >>> spatial_domain.sample(n=4, mode='random')
         tensor([[0.0108, 0.7643],
    @@ -233,22 +685,22 @@
     
    -
    -
    -is_inside(point, check_border=False)[source]
    +
    +
    +is_inside(point, check_border=False)[source]

    Check if a point is inside the ellipsoid.

    -
    Parameters
    +
    Parameters:
    • point (LabelTensor) – Point to be checked

    • check_border (bool) – Check if the point is also on the frontier of the hypercube, default False.

    -
    Returns
    +
    Returns:

    Returning True if the point is inside, False otherwise.

    -
    Return type
    +
    Return type:

    bool

    @@ -256,39 +708,85 @@
    -
    +
-
- - - - - + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/geometry/difference_domain.html b/_rst/geometry/difference_domain.html index bb00376e..e9752c2d 100644 --- a/_rst/geometry/difference_domain.html +++ b/_rst/geometry/difference_domain.html @@ -1,123 +1,575 @@ + - - - - - Difference — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Difference — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Difference

    +
    + + + + + +
    + +
    +

    Difference#

    Module for Difference class.

    -
    -
    -class Difference(geometries)[source]
    -

    Bases: pina.geometry.operation_interface.OperationInterface

    +
    +
    +class Difference(geometries)[source]#
    +

    Bases: OperationInterface

    PINA implementation of Difference of Domains. Given two sets \(A\) and \(B\) then the domain difference is defined as:

    @@ -126,14 +578,14 @@

    with \(x\) a point in \(\mathbb{R}^N\) and \(N\) the dimension of the geometry space.

    -
    Parameters
    +
    Parameters:

    geometries (list) – A list of geometries from pina.geometry such as EllipsoidDomain or CartesianDomain. The first geometry in the list is the geometry from which points are sampled. The rest of the geometries are the geometries that are excluded from the first geometry to find the difference.

    -
    Example
    +
    Example:
    >>> # Create two ellipsoid domains
     >>> ellipsoid1 = EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1]})
     >>> ellipsoid2 = EllipsoidDomain({'x': [0, 2], 'y': [0, 2]})
    @@ -143,45 +595,45 @@
     
    -
    -
    -is_inside(point, check_border=False)[source]
    +
    +
    +is_inside(point, check_border=False)[source]#

    Check if a point is inside the Difference domain.

    -
    Parameters
    +
    Parameters:
      -
    • point (torch.Tensor) – Point to be checked.

    • +
    • point (torch.Tensor) – Point to be checked.

    • check_border (bool) – If True, the border is considered inside.

    -
    Returns
    +
    Returns:

    True if the point is inside the Exclusion domain, False otherwise.

    -
    Return type
    +
    Return type:

    bool

    -
    -
    -sample(n, mode='random', variables='all')[source]
    +
    +
    +sample(n, mode='random', variables='all')[source]#

    Sample routine for Difference domain.

    -
    Parameters
    +
    Parameters:
    • n (int) – Number of points to sample in the shape.

    • mode (str) – Mode for sampling, defaults to random. Available modes include: random.

    • -
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    • +
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    -
    Returns
    +
    Returns:

    Returns LabelTensor of n sampled points.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    Example
    +
    Example:
    >>> # Create two Cartesian domains
     >>> cartesian1 = CartesianDomain({'x': [0, 2], 'y': [0, 2]})
     >>> cartesian2 = CartesianDomain({'x': [1, 3], 'y': [1, 3]})
    @@ -204,39 +656,101 @@
     
     
    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/geometry/ellipsoid.html b/_rst/geometry/ellipsoid.html index edc0311c..dda3becf 100644 --- a/_rst/geometry/ellipsoid.html +++ b/_rst/geometry/ellipsoid.html @@ -1,125 +1,577 @@ + - - - - - EllipsoidDomain — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + EllipsoidDomain — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    EllipsoidDomain

    -
    -
    -class EllipsoidDomain(ellipsoid_dict, sample_surface=False)[source]
    -

    Bases: pina.geometry.location.Location

    + +
    + + +
    +
    + + + + + +
    + +
    +

    EllipsoidDomain#

    +
    +
    +class EllipsoidDomain(ellipsoid_dict, sample_surface=False)[source]
    +

    Bases: Location

    PINA implementation of Ellipsoid domain.

    PINA implementation of Ellipsoid domain.

    -
    Parameters
    +
    Parameters:
    • ellipsoid_dict (dict) – A dictionary with dict-key a string representing the input variables for the pinn, and dict-value a list with @@ -143,29 +595,29 @@ Vol. 7. No. 8. 2001.

    -
    Example
    +
    Example:
    >>> spatial_domain = Ellipsoid({'x':[-1, 1], 'y':[-1,1]})
     
    -
    -
    -property variables
    +
    +
    +property variables

    Spatial variables.

    -
    Returns
    +
    Returns:

    Spatial variables defined in ‘__init__()’

    -
    Return type
    +
    Return type:

    list[str]

    -
    -
    -is_inside(point, check_border=False)[source]
    +
    +
    +is_inside(point, check_border=False)[source]

    Check if a point is inside the ellipsoid domain.

    Note

    @@ -174,41 +626,41 @@ points on the surface, and not inside the domain.

    -
    Parameters
    +
    Parameters:
    • point (LabelTensor) – Point to be checked.

    • check_border (bool) – Check if the point is also on the frontier of the ellipsoid, default False.

    -
    Returns
    +
    Returns:

    Returning True if the point is inside, False otherwise.

    -
    Return type
    +
    Return type:

    bool

    -
    -
    -sample(n, mode='random', variables='all')[source]
    +
    +
    +sample(n, mode='random', variables='all')[source]

    Sample routine.

    -
    Parameters
    +
    Parameters:
    • n (int) – Number of points to sample in the shape.

    • mode (str) – Mode for sampling, defaults to random. Available modes include: random.

    • -
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    • +
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    -
    Returns
    +
    Returns:

    Returns LabelTensor of n sampled points.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    Example
    +
    Example:
    >>> elips = Ellipsoid({'x':[1, 0], 'y':1})
     >>> elips.sample(n=6)
         tensor([[0.4872, 1.0000],
    @@ -225,39 +677,85 @@
     
     
    -
    +
-
- - - - - + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/geometry/exclusion_domain.html b/_rst/geometry/exclusion_domain.html index ec1b5ec1..b89bdcb6 100644 --- a/_rst/geometry/exclusion_domain.html +++ b/_rst/geometry/exclusion_domain.html @@ -1,123 +1,575 @@ + - - - - - Exclusion — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Exclusion — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Exclusion

    +
    + + + + + +
    + +
    +

    Exclusion#

    Module for Exclusion class.

    -
    -
    -class Exclusion(geometries)[source]
    -

    Bases: pina.geometry.operation_interface.OperationInterface

    +
    +
    +class Exclusion(geometries)[source]#
    +

    Bases: OperationInterface

    PINA implementation of Exclusion of Domains. Given two sets \(A\) and \(B\) then the domain difference is defined as:

    @@ -126,11 +578,11 @@

    with \(x\) a point in \(\mathbb{R}^N\) and \(N\) the dimension of the geometry space.

    -
    Parameters
    +
    Parameters:

    geometries (list) – A list of geometries from pina.geometry such as EllipsoidDomain or CartesianDomain.

    -
    Example
    +
    Example:
    >>> # Create two ellipsoid domains
     >>> ellipsoid1 = EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1]})
     >>> ellipsoid2 = EllipsoidDomain({'x': [0, 2], 'y': [0, 2]})
    @@ -140,45 +592,45 @@
     
    -
    -
    -is_inside(point, check_border=False)[source]
    +
    +
    +is_inside(point, check_border=False)[source]#

    Check if a point is inside the Exclusion domain.

    -
    Parameters
    +
    Parameters:
      -
    • point (torch.Tensor) – Point to be checked.

    • +
    • point (torch.Tensor) – Point to be checked.

    • check_border (bool) – If True, the border is considered inside.

    -
    Returns
    +
    Returns:

    True if the point is inside the Exclusion domain, False otherwise.

    -
    Return type
    +
    Return type:

    bool

    -
    -
    -sample(n, mode='random', variables='all')[source]
    +
    +
    +sample(n, mode='random', variables='all')[source]#

    Sample routine for Exclusion domain.

    -
    Parameters
    +
    Parameters:
    • n (int) – Number of points to sample in the shape.

    • mode (str) – Mode for sampling, defaults to random. Available modes include: random.

    • -
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    • +
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    -
    Returns
    +
    Returns:

    Returns LabelTensor of n sampled points.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    Example
    +
    Example:
    >>> # Create two Cartesian domains
     >>> cartesian1 = CartesianDomain({'x': [0, 2], 'y': [0, 2]})
     >>> cartesian2 = CartesianDomain({'x': [1, 3], 'y': [1, 3]})
    @@ -201,39 +653,101 @@
     
     
    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/geometry/intersection_domain.html b/_rst/geometry/intersection_domain.html index 16dc4a08..77caf0cf 100644 --- a/_rst/geometry/intersection_domain.html +++ b/_rst/geometry/intersection_domain.html @@ -1,123 +1,575 @@ + - - - - - Intersection — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Intersection — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Intersection

    +
    + + + + + +
    + +
    +

    Intersection#

    Module for Intersection class.

    -
    -
    -class Intersection(geometries)[source]
    -

    Bases: pina.geometry.operation_interface.OperationInterface

    +
    +
    +class Intersection(geometries)[source]#
    +

    Bases: OperationInterface

    PINA implementation of Intersection of Domains. Given two sets \(A\) and \(B\) then the domain difference is defined as:

    @@ -126,13 +578,13 @@

    with \(x\) a point in \(\mathbb{R}^N\) and \(N\) the dimension of the geometry space.

    -
    Parameters
    +
    Parameters:

    geometries (list) – A list of geometries from pina.geometry such as EllipsoidDomain or CartesianDomain. The intersection will be taken between all the geometries in the list. The resulting geometry will be the intersection of all the geometries in the list.

    -
    Example
    +
    Example:
    >>> # Create two ellipsoid domains
     >>> ellipsoid1 = EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1]})
     >>> ellipsoid2 = EllipsoidDomain({'x': [0, 2], 'y': [0, 2]})
    @@ -142,45 +594,45 @@
     
    -
    -
    -is_inside(point, check_border=False)[source]
    +
    +
    +is_inside(point, check_border=False)[source]#

    Check if a point is inside the Intersection domain.

    -
    Parameters
    +
    Parameters:
      -
    • point (torch.Tensor) – Point to be checked.

    • +
    • point (torch.Tensor) – Point to be checked.

    • check_border (bool) – If True, the border is considered inside.

    -
    Returns
    +
    Returns:

    True if the point is inside the Intersection domain, False otherwise.

    -
    Return type
    +
    Return type:

    bool

    -
    -
    -sample(n, mode='random', variables='all')[source]
    +
    +
    +sample(n, mode='random', variables='all')[source]#

    Sample routine for Intersection domain.

    -
    Parameters
    +
    Parameters:
    • n (int) – Number of points to sample in the shape.

    • mode (str) – Mode for sampling, defaults to random. Available modes include: random.

    • -
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    • +
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    -
    Returns
    +
    Returns:

    Returns LabelTensor of n sampled points.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    Example
    +
    Example:
    >>> # Create two Cartesian domains
     >>> cartesian1 = CartesianDomain({'x': [0, 2], 'y': [0, 2]})
     >>> cartesian2 = CartesianDomain({'x': [1, 3], 'y': [1, 3]})
    @@ -203,39 +655,101 @@
     
     
    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/geometry/location.html b/_rst/geometry/location.html index e6844e19..42c434c3 100644 --- a/_rst/geometry/location.html +++ b/_rst/geometry/location.html @@ -1,140 +1,592 @@ + - - - - - Location — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Location — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Location

    +
    + + + + + +
    + +
    +

    Location#

    Module for Location class.

    -
    -
    -class Location[source]
    +
    +
    +class Location[source]#

    Bases: object

    Abstract Location class. Any geometry entity should inherit from this class.

    -
    -
    -abstract sample()[source]
    +
    +
    +abstract sample()[source]#

    Abstract method for sampling a point from the location. To be implemented in the child class.

    -
    -
    -abstract is_inside(point, check_border=False)[source]
    +
    +
    +abstract is_inside(point, check_border=False)[source]#

    Abstract method for checking if a point is inside the location. To be implemented in the child class.

    -
    Parameters
    +
    Parameters:
      -
    • point (torch.Tensor) – A tensor point to be checked.

    • +
    • point (torch.Tensor) – A tensor point to be checked.

    • check_border (bool) – A boolean that determines whether the border of the location is considered checked to be considered inside or not. Defaults to False.

    • @@ -145,39 +597,101 @@
    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/geometry/operation_interface.html b/_rst/geometry/operation_interface.html index 22e0ee24..9aa5f4bd 100644 --- a/_rst/geometry/operation_interface.html +++ b/_rst/geometry/operation_interface.html @@ -1,166 +1,617 @@ + - - - - - OperationInterface — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + OperationInterface — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    OperationInterface

    +
    + + + + + +
    + +
    +

    OperationInterface#

    Module for OperationInterface class.

    -
    -
    -class OperationInterface(geometries)[source]
    -

    Bases: pina.geometry.location.Location

    +
    +
    +class OperationInterface(geometries)[source]#
    +

    Bases: Location

    Abstract set operation class. Any geometry operation entity must inherit from this class.

    -
    Parameters
    +
    Parameters:

    geometries (list) – A list of geometries from pina.geometry such as EllipsoidDomain or CartesianDomain.

    -
    -
    -property geometries
    +
    +
    +property geometries#

    The geometries to perform set operation.

    -
    -
    -property variables
    +
    +
    +property variables#

    Spatial variables of the domain.

    -
    Returns
    +
    Returns:

    All the variables defined in __init__ in order.

    -
    Return type
    +
    Return type:

    list[str]

    -
    -
    -abstract is_inside(point, check_border=False)[source]
    +
    +
    +abstract is_inside(point, check_border=False)[source]#

    Check if a point is inside the resulting domain after a set operation is applied.

    -
    Parameters
    +
    Parameters:
      -
    • point (torch.Tensor) – Point to be checked.

    • +
    • point (torch.Tensor) – Point to be checked.

    • check_border (bool) – If True, the border is considered inside.

    -
    Returns
    +
    Returns:

    True if the point is inside the Intersection domain, False otherwise.

    -
    Return type
    +
    Return type:

    bool

    @@ -168,39 +619,102 @@
    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/geometry/simplex.html b/_rst/geometry/simplex.html index 9be11cf5..d9224b19 100644 --- a/_rst/geometry/simplex.html +++ b/_rst/geometry/simplex.html @@ -1,124 +1,576 @@ + - - - - - SimplexDomain — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + SimplexDomain — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    SimplexDomain

    -
    -
    -class SimplexDomain(simplex_matrix, sample_surface=False)[source]
    -

    Bases: pina.geometry.location.Location

    + +
    + + +
    +
    + + + + + +
    + +
    +

    SimplexDomain#

    +
    +
    +class SimplexDomain(simplex_matrix, sample_surface=False)[source]
    +

    Bases: Location

    PINA implementation of a Simplex.

    -
    Parameters
    +
    Parameters:
    • simplex_matrix (list[LabelTensor]) – A matrix of LabelTensor objects representing a vertex of the simplex (a tensor), and the coordinates of the @@ -138,7 +590,7 @@ for sampling should be used.

    -
    Example
    +
    Example:
    >>> spatial_domain = SimplexDomain(
             [
                 LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]),
    @@ -150,24 +602,24 @@
     
    -
    -
    -is_inside(point, check_border=False)[source]
    +
    +
    +is_inside(point, check_border=False)[source]

    Check if a point is inside the simplex. Uses the algorithm described involving barycentric coordinates: https://en.wikipedia.org/wiki/Barycentric_coordinate_system.

    -
    Parameters
    +
    Parameters:
    • point (LabelTensor) – Point to be checked.

    • check_border (bool) – Check if the point is also on the frontier of the simplex, default False.

    -
    Returns
    +
    Returns:

    Returning True if the point is inside, False otherwise.

    -
    Return type
    +
    Return type:

    bool

    @@ -179,22 +631,22 @@
    -
    -
    -sample(n, mode='random', variables='all')[source]
    +
    +
    +sample(n, mode='random', variables='all')[source]

    Sample n points from Simplex domain.

    -
    Parameters
    +
    Parameters:
    • n (int) – Number of points to sample in the shape.

    • mode (str) – Mode for sampling, defaults to random. Available modes include: random.

    • -
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    • +
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    -
    Returns
    +
    Returns:

    Returns LabelTensor of n sampled points.

    -
    Return type
    +
    Return type:

    LabelTensor

    @@ -208,39 +660,85 @@
    -
    +
-
- - - - - + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/geometry/union_domain.html b/_rst/geometry/union_domain.html index 1634bee6..8ecfde69 100644 --- a/_rst/geometry/union_domain.html +++ b/_rst/geometry/union_domain.html @@ -1,123 +1,575 @@ + - - - - - Union — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Union — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Union

    +
    + + + + + +
    + +
    +

    Union#

    Module for Union class.

    -
    -
    -class Union(geometries)[source]
    -

    Bases: pina.geometry.operation_interface.OperationInterface

    +
    +
    +class Union(geometries)[source]#
    +

    Bases: OperationInterface

    PINA implementation of Unions of Domains. Given two sets \(A\) and \(B\) then the domain difference is defined as:

    @@ -126,11 +578,11 @@

    with \(x\) a point in \(\mathbb{R}^N\) and \(N\) the dimension of the geometry space.

    -
    Parameters
    +
    Parameters:

    geometries (list) – A list of geometries from pina.geometry such as EllipsoidDomain or CartesianDomain.

    -
    Example
    +
    Example:
    >>> # Create two ellipsoid domains
     >>> ellipsoid1 = EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1]})
     >>> ellipsoid2 = EllipsoidDomain({'x': [0, 2], 'y': [0, 2]})
    @@ -140,46 +592,46 @@
     
    -
    -
    -is_inside(point, check_border=False)[source]
    +
    +
    +is_inside(point, check_border=False)[source]#

    Check if a point is inside the Union domain.

    -
    Parameters
    +
    Parameters:
    • point (LabelTensor) – Point to be checked.

    • check_border (bool) – Check if the point is also on the frontier of the ellipsoid, default False.

    -
    Returns
    +
    Returns:

    Returning True if the point is inside, False otherwise.

    -
    Return type
    +
    Return type:

    bool

    -
    -
    -sample(n, mode='random', variables='all')[source]
    +
    +
    +sample(n, mode='random', variables='all')[source]#

    Sample routine for Union domain.

    -
    Parameters
    +
    Parameters:
    • n (int) – Number of points to sample in the shape.

    • mode (str) – Mode for sampling, defaults to random. Available modes include: random.

    • -
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    • +
    • variables (str | list[str]) – Variables to be sampled, defaults to all.

    -
    Returns
    +
    Returns:

    Returns LabelTensor of n sampled points.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    Example
    +
    Example:
    >>> # Create two ellipsoid domains
     >>> cartesian1 = CartesianDomain({'x': [0, 2], 'y': [0, 2]})
     >>> cartesian2 = CartesianDomain({'x': [1, 3], 'y': [1, 3]})
    @@ -202,39 +654,101 @@
     
     
    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/label_tensor.html b/_rst/label_tensor.html index 66365aa0..0240e6c4 100644 --- a/_rst/label_tensor.html +++ b/_rst/label_tensor.html @@ -1,133 +1,585 @@ + - - - - - LabelTensor — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + LabelTensor — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
+ - +
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/layers/avno_layer.html b/_rst/layers/avno_layer.html index 433ede44..e6dd14b3 100644 --- a/_rst/layers/avno_layer.html +++ b/_rst/layers/avno_layer.html @@ -1,127 +1,574 @@ + - - - - - Averaging layers — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Averaging layers — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
-
- - - - - + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/layers/convolution.html b/_rst/layers/convolution.html index dba73712..075ab8e9 100644 --- a/_rst/layers/convolution.html +++ b/_rst/layers/convolution.html @@ -1,127 +1,574 @@ + - - - - - Continuous convolution — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Continuous convolution — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    Continuous convolution

    -
    -
    -class ContinuousConvBlock(input_numb_field, output_numb_field, filter_dim, stride, model=None, optimize=False, no_overlap=False)[source]
    -

    Bases: pina.model.layers.convolution.BaseContinuousConv

    + +
    + + +
    +
    + + + + + +
    + +
    +

    Continuous convolution#

    +
    +
    +class ContinuousConvBlock(input_numb_field, output_numb_field, filter_dim, stride, model=None, optimize=False, no_overlap=False)[source]
    +

    Bases: BaseContinuousConv

    Implementation of Continuous Convolutional operator.

    The algorithm expects input to be in the form: \([B, N_{in}, N, D]\) @@ -144,13 +591,13 @@

    Continuous convolutionhttps://doi.org/10.1007/s00466-023-02291-1

    -
    Parameters
    +
    Parameters:
    • input_numb_field (int) – Number of fields \(N_{in}\) in the input.

    • output_numb_field (int) – Number of fields \(N_{out}\) in the output.

    • -
    • filter_dim (tuple(int) | list(int)) – Dimension of the filter.

    • +
    • filter_dim (tuple(int) | list(int)) – Dimension of the filter.

    • stride (dict) – Stride for the filter.

    • -
    • model (torch.nn.Module) – Neural network for inner parametrization, +

    • model (torch.nn.Module) – Neural network for inner parametrization, defaults to None. If None, a default multilayer perceptron of width three and size twenty with ReLU activation is used.

    • optimize (bool) – Flag for performing optimization on the continuous @@ -172,7 +619,7 @@

      Continuous convolutiontranspose and forward modes.

    -
    Example
    +
    Example:
    -
    -
    -forward(X)[source]
    +
    +
    +forward(X)[source]

    Forward pass in the convolutional layer.

    -
    Parameters
    -

    x (torch.Tensor) – Input data for the convolution \([B, N_{in}, N, D]\).

    +
    Parameters:
    +

    x (torch.Tensor) – Input data for the convolution \([B, N_{in}, N, D]\).

    -
    Returns
    +
    Returns:

    Convolution output \([B, N_{out}, N, D]\).

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    -
    -transpose_no_overlap(integrals, X)[source]
    +
    +
    +transpose_no_overlap(integrals, X)[source]

    Transpose pass in the layer for no-overlapping filters

    -
    Parameters
    +
    Parameters:
    • integrals – Weights for the transpose convolution. Shape \([B, N_{in}, N]\) where B is the batch_size, :math`N_{in}` is the number of input fields, \(N\) the number of points in the mesh, D the dimension of the problem.

    • -
    • X (torch.Tensor) – Input data. Expect tensor of shape +

    • X (torch.Tensor) – Input data. Expect tensor of shape \([B, N_{in}, M, D]\) where \(B\) is the batch_size, :math`N_{in}`is the number of input fields, \(M\) the number of points in the mesh, \(D\) the dimension of the problem.

    -
    Returns
    +
    Returns:

    Feed forward transpose convolution. Tensor of shape \([B, N_{out}, M, D]\) where \(B\) is the batch_size, :math`N_{out}`is the number of input fields, \(M\) the number of points in the mesh, \(D\) the dimension of the problem.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    @@ -269,32 +716,32 @@

    Continuous convolution

    -
    -
    -transpose_overlap(integrals, X)[source]
    +
    +
    +transpose_overlap(integrals, X)[source]

    Transpose pass in the layer for overlapping filters

    -
    Parameters
    +
    Parameters:
    • integrals – Weights for the transpose convolution. Shape \([B, N_{in}, N]\) where B is the batch_size, :math`N_{in}` is the number of input fields, \(N\) the number of points in the mesh, D the dimension of the problem.

    • -
    • X (torch.Tensor) – Input data. Expect tensor of shape +

    • X (torch.Tensor) – Input data. Expect tensor of shape \([B, N_{in}, M, D]\) where \(B\) is the batch_size, :math`N_{in}`is the number of input fields, \(M\) the number of points in the mesh, \(D\) the dimension of the problem.

    -
    Returns
    +
    Returns:

    Feed forward transpose convolution. Tensor of shape \([B, N_{out}, M, D]\) where \(B\) is the batch_size, :math`N_{out}`is the number of input fields, \(M\) the number of points in the mesh, \(D\) the dimension of the problem.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    @@ -306,39 +753,85 @@

    Continuous convolution

    -
    +
-
- - - - - + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/layers/enhanced_linear.html b/_rst/layers/enhanced_linear.html index 4a055f80..8be9e7cc 100644 --- a/_rst/layers/enhanced_linear.html +++ b/_rst/layers/enhanced_linear.html @@ -1,137 +1,583 @@ + - - - - - EnhancedLinear — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + EnhancedLinear — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    EnhancedLinear

    -
    -
    -class EnhancedLinear(layer, activation=None, dropout=None)[source]
    -

    Bases: torch.nn.modules.module.Module

    + +
    + + +
    +
    + + + + + +
    + +
    +

    EnhancedLinear#

    +
    +
    +class EnhancedLinear(layer, activation=None, dropout=None)[source]
    +

    Bases: Module

    A wrapper class for enhancing a linear layer with activation and/or dropout.

    -
    Parameters
    +
    Parameters:
      -
    • layer (torch.nn.Module) – The linear layer to be enhanced.

    • -
    • activation (torch.nn.Module) – The activation function to be applied after the linear layer.

    • +
    • layer (torch.nn.Module) – The linear layer to be enhanced.

    • +
    • activation (torch.nn.Module) – The activation function to be applied after the linear layer.

    • dropout (float) – The dropout probability to be applied after the activation (if provided).

    -
    Example
    +
    Example:

    >>> linear_layer = torch.nn.Linear(10, 20)
    @@ -142,66 +588,112 @@ 

    EnhancedLinear -
    Parameters
    +
    Parameters:
      -
    • layer (torch.nn.Module) – The linear layer to be enhanced.

    • -
    • activation (torch.nn.Module) – The activation function to be applied after the linear layer.

    • +
    • layer (torch.nn.Module) – The linear layer to be enhanced.

    • +
    • activation (torch.nn.Module) – The activation function to be applied after the linear layer.

    • dropout (float) – The dropout probability to be applied after the activation (if provided).

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]

    Forward pass through the enhanced linear module.

    -
    Parameters
    -

    x (torch.Tensor) – Input tensor.

    +
    Parameters:
    +

    x (torch.Tensor) – Input tensor.

    -
    Returns
    +
    Returns:

    Output tensor after passing through the enhanced linear module.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    +
-
- - - - - + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/layers/fourier.html b/_rst/layers/fourier.html index 38644e57..a180cf73 100644 --- a/_rst/layers/fourier.html +++ b/_rst/layers/fourier.html @@ -1,127 +1,573 @@ + - - - - - Fourier Layers — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Fourier Layers — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    Fourier Layers

    -
    -
    -class FourierBlock1D(input_numb_fields, output_numb_fields, n_modes, activation=<class 'torch.nn.modules.activation.Tanh'>)[source]
    -

    Bases: torch.nn.modules.module.Module

    + +
    + + +
    +
    + + + + + +
    + +
    +

    Fourier Layers#

    +
    +
    +class FourierBlock1D(input_numb_fields, output_numb_fields, n_modes, activation=<class 'torch.nn.modules.activation.Tanh'>)[source]#
    +

    Bases: Module

    Fourier block implementation for three dimensional input tensor. The combination of Fourier blocks make up the Fourier Neural Operator

    @@ -133,33 +579,33 @@

    Fourier LayersarXiv preprint arXiv:2010.08895.

    Initialize internal Module state, shared by both nn.Module and ScriptModule.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward computation for Fourier Block. It performs a spectral convolution and a linear transformation of the input and sum the results.

    -
    Parameters
    -

    x (torch.Tensor) – The input tensor for fourier block, expect of size +

    Parameters:
    +

    x (torch.Tensor) – The input tensor for fourier block, expect of size [batch, input_numb_fields, x].

    -
    Returns
    +
    Returns:

    The output tensor obtained from the fourier block of size [batch, output_numb_fields, x].

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    -
    -class FourierBlock2D(input_numb_fields, output_numb_fields, n_modes, activation=<class 'torch.nn.modules.activation.Tanh'>)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    +
    +class FourierBlock2D(input_numb_fields, output_numb_fields, n_modes, activation=<class 'torch.nn.modules.activation.Tanh'>)[source]#
    +

    Bases: Module

    Fourier block implementation for two dimensional input tensor. The combination of Fourier blocks make up the Fourier Neural Operator

    @@ -180,43 +626,43 @@

    Fourier Layers[batch, input_numb_fields, Nx, Ny] and returns an output of size [batch, output_numb_fields, Nx, Ny].

    -
    Parameters
    +
    Parameters:
    • input_numb_fields (int) – The number of channels for the input.

    • output_numb_fields (int) – The number of channels for the output.

    • -
    • | tuple n_modes (list) – Number of modes to select for each dimension. +

    • n_modes (list | tuple) – Number of modes to select for each dimension. It must be at most equal to the floor(Nx/2)+1 and floor(Ny/2)+1.

    • -
    • activation (torch.nn.Module) – The activation function.

    • +
    • activation (torch.nn.Module) – The activation function.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward computation for Fourier Block. It performs a spectral convolution and a linear transformation of the input and sum the results.

    -
    Parameters
    -

    x (torch.Tensor) – The input tensor for fourier block, expect of size +

    Parameters:
    +

    x (torch.Tensor) – The input tensor for fourier block, expect of size [batch, input_numb_fields, x, y].

    -
    Returns
    +
    Returns:

    The output tensor obtained from the fourier block of size [batch, output_numb_fields, x, y, z].

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    -
    -class FourierBlock3D(input_numb_fields, output_numb_fields, n_modes, activation=<class 'torch.nn.modules.activation.Tanh'>)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    +
    +class FourierBlock3D(input_numb_fields, output_numb_fields, n_modes, activation=<class 'torch.nn.modules.activation.Tanh'>)[source]#
    +

    Bases: Module

    Fourier block implementation for three dimensional input tensor. The combination of Fourier blocks make up the Fourier Neural Operator

    @@ -237,73 +683,142 @@

    Fourier Layers[batch, input_numb_fields, Nx, Ny, Nz] and returns an output of size [batch, output_numb_fields, Nx, Ny, Nz].

    -
    Parameters
    +
    Parameters:
    • input_numb_fields (int) – The number of channels for the input.

    • output_numb_fields (int) – The number of channels for the output.

    • -
    • | tuple n_modes (list) – Number of modes to select for each dimension. +

    • n_modes (list | tuple) – Number of modes to select for each dimension. It must be at most equal to the floor(Nx/2)+1, floor(Ny/2)+1 and floor(Nz/2)+1.

    • -
    • activation (torch.nn.Module) – The activation function.

    • +
    • activation (torch.nn.Module) – The activation function.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward computation for Fourier Block. It performs a spectral convolution and a linear transformation of the input and sum the results.

    -
    Parameters
    -

    x (torch.Tensor) – The input tensor for fourier block, expect of size +

    Parameters:
    +

    x (torch.Tensor) – The input tensor for fourier block, expect of size [batch, input_numb_fields, x, y, z].

    -
    Returns
    +
    Returns:

    The output tensor obtained from the fourier block of size [batch, output_numb_fields, x, y, z].

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    +
-
- - - - - + + + + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/layers/fourier_embedding.html b/_rst/layers/fourier_embedding.html index 9ea86f7b..1ea59221 100644 --- a/_rst/layers/fourier_embedding.html +++ b/_rst/layers/fourier_embedding.html @@ -1,127 +1,574 @@ + - - - - - Fourier Feature Embedding — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Fourier Feature Embedding — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Fourier Feature Embedding

    -
    -
    -class FourierFeatureEmbedding(input_dimension, output_dimension, sigma)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    + + + + + +
    + +
    +

    Fourier Feature Embedding#

    +
    +
    +class FourierFeatureEmbedding(input_dimension, output_dimension, sigma)[source]#
    +

    Bases: Module

    Fourier Feature Embedding class for encoding input features using random Fourier features.This class applies a Fourier transformation to the input features, @@ -160,76 +607,138 @@

    Fourier Feature Embedding10.1016/j.cma.2021.113938.

    -
    Parameters
    +
    Parameters:
    • input_dimension (int) – The input vector dimension of the layer.

    • output_dimension (int) – The output dimension of the layer. The output is obtained as a concatenation of the cosine and sine embedding, hence it must be a multiple of two (even number).

    • -
    • | float sigma (int) – The standard deviation used for the +

    • sigma (int | float) – The standard deviation used for the Fourier Embedding. This value must reflect the granularity of the scale in the differential equation solution.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward pass to compute the fourier embedding.

    -
    Parameters
    -

    x (torch.Tensor) – Input tensor.

    +
    Parameters:
    +

    x (torch.Tensor) – Input tensor.

    -
    Returns
    +
    Returns:

    Fourier embeddings of the input.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    -
    -property sigma
    +
    +
    +property sigma#

    Returning the variance of the sampled matrix for Fourier Embedding.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/layers/lowrank_layer.html b/_rst/layers/lowrank_layer.html index fcb4489d..3d30745e 100644 --- a/_rst/layers/lowrank_layer.html +++ b/_rst/layers/lowrank_layer.html @@ -1,127 +1,574 @@ + - - - - - Low Rank layer — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Low Rank layer — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    Low Rank layer

    -
    -
    -class LowRankBlock(input_dimensions, embedding_dimenion, rank, inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, bias=True)[source]
    -

    Bases: torch.nn.modules.module.Module

    + +
    + + +
    +
    + + + + + +
    + +
    +

    Low Rank layer#

    +
    +
    +class LowRankBlock(input_dimensions, embedding_dimenion, rank, inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, bias=True)[source]
    +

    Bases: Module

    The PINA implementation of the inner layer of the Averaging Neural Operator.

    The operator layer performs an affine transformation where the convolution is approximated with a local average. Given the input function @@ -152,7 +599,7 @@

    Low Rank layer -
    Parameters
    +
    Parameters:
    • input_dimensions (int) – The number of input components of the model. @@ -171,7 +618,7 @@

      Low Rank layertorch.nn.Module is passed, this is used as +torch.nn.Module is passed, this is used as activation function after any layers, except the last one. If a list of Modules is passed, they are used as activation functions at any layers, in order.

    • @@ -180,9 +627,9 @@

      Low Rank layer -
      -forward(x, coords)[source]
      +
      +
      +forward(x, coords)[source]

      Forward pass of the layer, it performs an affine transformation of the field, and a low rank approximation by doing a dot product of the basis @@ -190,72 +637,118 @@

      Low Rank layer\(\phi^{(i)}\) evaluated in the spatial input \(x\).

      -
      Parameters
      +
      Parameters:
        -
      • x (torch.Tensor) – The input tensor for performing the +

      • x (torch.Tensor) – The input tensor for performing the computation. It expects a tensor \(B \times N \times D\), where \(B\) is the batch_size, \(N\) the number of points in the mesh, \(D\) the dimension of the problem. In particular \(D\) is the codomain of the function \(v\). For example a scalar function has \(D=1\), a 4-dimensional vector function \(D=4\).

      • -
      • coords (torch.Tensor) – The coordinates in which the field is +

      • coords (torch.Tensor) – The coordinates in which the field is evaluated for performing the computation. It expects a tensor \(B \times N \times d\), where \(B\) is the batch_size, \(N\) the number of points in the mesh, \(D\) the dimension of the domain.

      -
      Returns
      +
      Returns:

      The output tensor obtained from Average Neural Operator Block.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

      -
      -
      -property rank
      +
      +
      +property rank

      The basis rank.

    -
    +
-
- - - - - + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/layers/orthogonal.html b/_rst/layers/orthogonal.html new file mode 100644 index 00000000..a524a54c --- /dev/null +++ b/_rst/layers/orthogonal.html @@ -0,0 +1,630 @@ + + + + + + + + + + + OrthogonalBlock — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

OrthogonalBlock#

+
+
+class OrthogonalBlock(dim=-1, requires_grad=True)[source]#
+

Bases: Module

+

Module to make the input orthonormal. +The module takes a tensor of size \([N, M]\) and returns a tensor of +size \([N, M]\) where the columns are orthonormal. The block performs a +Gram Schmidt orthogonalization process for the input, see +here <https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process> for +details.

+

Initialize the OrthogonalBlock module.

+
+
Parameters:
+
    +
  • dim (int) – The dimension where to orthogonalize.

  • +
  • requires_grad (bool) – If autograd should record operations on +the returned tensor, defaults to True.

  • +
+
+
+
+
+forward(X)[source]#
+

Forward pass of the OrthogonalBlock module using a Gram-Schmidt +algorithm.

+
+
Raises:
+

Warning – If the dimension is greater than the other dimensions.

+
+
Parameters:
+

X (torch.Tensor) – The input tensor to orthogonalize. The input must +be of dimensions \([N, M]\).

+
+
Returns:
+

The orthonormal tensor.

+
+
+
+ +
+
+property dim#
+

Get the dimension along which operations are performed.

+
+
Returns:
+

The current dimension value.

+
+
Return type:
+

int

+
+
+
+ +
+
+property requires_grad#
+

Indicates whether gradient computation is required for operations +on the tensors.

+
+
Returns:
+

True if gradients are required, False otherwise.

+
+
Return type:
+

bool

+
+
+
+ +
+ +
+ + +
+ + + + + +
+ + + + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_rst/layers/pbc_embedding.html b/_rst/layers/pbc_embedding.html index 206084fb..056da3ed 100644 --- a/_rst/layers/pbc_embedding.html +++ b/_rst/layers/pbc_embedding.html @@ -1,127 +1,574 @@ + - - - - - Periodic Boundary Condition Embedding — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Periodic Boundary Condition Embedding — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Periodic Boundary Condition Embedding

    -
    -
    -class PeriodicBoundaryEmbedding(input_dimension, periods, output_dimension=None)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    + + + + + +
    + +
    +

    Periodic Boundary Condition Embedding#

    +
    +
    +class PeriodicBoundaryEmbedding(input_dimension, periods, output_dimension=None)[source]#
    +

    Bases: Module

    Imposing hard constraint periodic boundary conditions by embedding the input.

    A periodic function \(u:\mathbb{R}^{\rm{in}} @@ -168,11 +615,11 @@

    Periodic Boundary Condition Embedding -
    Parameters
    +
    Parameters:
    • input_dimension (int) – The dimension of the input tensor, it can be checked with tensor.ndim method.

    • -
    • | int | dict periods (float) – The periodicity in each dimension for +

    • periods (float | int | dict) – The periodicity in each dimension for the input data. If float or int is passed, the period is assumed constant for all the dimensions of the data. If a dict is passed the dict.values represent periods, @@ -187,64 +634,126 @@

      Periodic Boundary Condition Embedding -
      -forward(x)[source]
      +
      +
      +forward(x)[source]#

      Forward pass to compute the periodic boundary conditions embedding.

      -
      Parameters
      -

      x (torch.Tensor) – Input tensor.

      +
      Parameters:
      +

      x (torch.Tensor) – Input tensor.

      -
      Returns
      +
      Returns:

      Periodic embedding of the input.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

      -
      -
      -property period
      +
      +
      +property period#

      The period of the periodic function to approximate.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/layers/pod.html b/_rst/layers/pod.html index 36d93484..b54912ee 100644 --- a/_rst/layers/pod.html +++ b/_rst/layers/pod.html @@ -1,127 +1,573 @@ + - - - - - PODBlock — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + PODBlock — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    PODBlock

    -
    -
    -class PODBlock(rank, scale_coefficients=True)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    + + + + + +
    + +
    +

    PODBlock#

    +
    +
    +class PODBlock(rank, scale_coefficients=True)[source]#
    +

    Bases: Module

    POD layer: it projects the input field on the proper orthogonal decomposition basis. It needs to be fitted to the data before being used with the method fit(), which invokes the singular value decomposition. @@ -132,7 +578,7 @@

    PODBlock -
    Parameters
    +
    Parameters:
    • rank (int) – The rank of the POD layer.

    • scale_coefficients (bool) – If True, the coefficients are scaled @@ -140,155 +586,223 @@

      PODBlock -
      -property rank
      +
      +
      +property rank#

      The rank of the POD layer.

      -
      Return type
      +
      Return type:

      int

      -
      -
      -property basis
      +
      +
      +property basis#

      The POD basis. It is a matrix whose columns are the first self.rank POD modes.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

      -
      -
      -property scaler
      +
      +
      +property scaler#

      The scaler. It is a dictionary with the keys ‘mean’ and ‘std’ that store the mean and the standard deviation of the coefficients.

      -
      Return type
      +
      Return type:

      dict

      -
      -
      -property scale_coefficients
      +
      +
      +property scale_coefficients#

      If True, the coefficients are scaled after the projection to have zero mean and unit variance.

      -
      Return type
      +
      Return type:

      bool

      -
      -
      -fit(X)[source]
      +
      +
      +fit(X)[source]#

      Set the POD basis by performing the singular value decomposition of the given tensor. If self.scale_coefficients is True, the coefficients are scaled after the projection to have zero mean and unit variance.

      -
      Parameters
      -

      X (torch.Tensor) – The tensor to be reduced.

      +
      Parameters:
      +

      X (torch.Tensor) – The tensor to be reduced.

      -
      -
      -forward(X)[source]
      +
      +
      +forward(X)[source]#

      The forward pass of the POD layer. By default it executes the reduce() method, reducing the input tensor to its POD representation. The POD layer needs to be fitted before being used.

      -
      Parameters
      -

      X (torch.Tensor) – The input tensor to be reduced.

      +
      Parameters:
      +

      X (torch.Tensor) – The input tensor to be reduced.

      -
      Returns
      +
      Returns:

      The reduced tensor.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

      -
      -
      -reduce(X)[source]
      +
      +
      +reduce(X)[source]#

      Reduce the input tensor to its POD representation. The POD layer needs to be fitted before being used.

      -
      Parameters
      -

      X (torch.Tensor) – The input tensor to be reduced.

      +
      Parameters:
      +

      X (torch.Tensor) – The input tensor to be reduced.

      -
      Returns
      +
      Returns:

      The reduced tensor.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

      -
      -
      -expand(coeff)[source]
      +
      +
      +expand(coeff)[source]#

      Expand the given coefficients to the original space. The POD layer needs to be fitted before being used.

      -
      Parameters
      -

      coeff (torch.Tensor) – The coefficients to be expanded.

      +
      Parameters:
      +

      coeff (torch.Tensor) – The coefficients to be expanded.

      -
      Returns
      +
      Returns:

      The expanded tensor.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/layers/rbf_layer.html b/_rst/layers/rbf_layer.html new file mode 100644 index 00000000..c279b42e --- /dev/null +++ b/_rst/layers/rbf_layer.html @@ -0,0 +1,904 @@ + + + + + + + + + + + RBFBlock — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

RBFBlock#

+
+
+class RBFBlock(neighbors=None, smoothing=0.0, kernel='thin_plate_spline', epsilon=None, degree=None)[source]#
+

Bases: Module

+

Radial Basis Function (RBF) interpolation layer. It need to be fitted with +the data with the method fit(), before it can be used to interpolate +new points. The layer is not trainable.

+
+

Note

+

It reproduces the implementation of scipy.interpolate.RBFBlock and +it is inspired from the implementation in torchrbf.

+
+
+
Parameters:
+
    +
  • neighbors (int) – Number of neighbors to use for the +interpolation. +If None, use all data points.

  • +
  • smoothing (float) – Smoothing parameter for the interpolation. +if 0.0, the interpolation is exact and no smoothing is applied.

  • +
  • kernel (str) – Radial basis function to use. Must be one of +linear, thin_plate_spline, cubic, quintic, +multiquadric, inverse_multiquadric, inverse_quadratic, +or gaussian.

  • +
  • epsilon (float) – Shape parameter that scaled the input to +the RBF. This defaults to 1 for kernels in scale_invariant +dictionary, and must be specified for other kernels.

  • +
  • degree (int) – Degree of the added polynomial. +For some kernels, there exists a minimum degree of the polynomial +such that the RBF is well-posed. Those minimum degrees are specified +in the min_degree_funcs dictionary above. If degree is less than +the minimum degree, a warning is raised and the degree is set to the +minimum value.

  • +
+
+
+
+
+property smoothing#
+

Smoothing parameter for the interpolation.

+
+
Return type:
+

float

+
+
+
+ +
+
+property kernel#
+

Radial basis function to use.

+
+
Return type:
+

str

+
+
+
+ +
+
+property epsilon#
+

Shape parameter that scaled the input to the RBF.

+
+
Return type:
+

float

+
+
+
+ +
+
+property degree#
+

Degree of the added polynomial.

+
+
Return type:
+

int

+
+
+
+ +
+
+fit(y, d)[source]#
+

Fit the RBF interpolator to the data.

+
+
Parameters:
+
+
+
+
+ +
+
+forward(x)[source]#
+

Returns the interpolated data at the given points x.

+
+
Parameters:
+

x (torch.Tensor) – (n, d) tensor of points at which +to query the interpolator

+
+
Return type:
+

(n, m) torch.Tensor of interpolated data.

+
+
+
+ +
+
+static kernel_vector(x, y, kernel_func)[source]#
+

Evaluate radial functions with centers y for all points in x.

+
+
Parameters:
+
    +
  • x (torch.Tensor) – (n, d) tensor of points.

  • +
  • y (torch.Tensor) – (m, d) tensor of centers.

  • +
  • kernel_func (str) – Radial basis function to use.

  • +
+
+
Return type:
+

(n, m) torch.Tensor of radial function values.

+
+
+
+ +
+
+static polynomial_matrix(x, powers)[source]#
+

Evaluate monomials at x with given powers.

+
+
Parameters:
+
+
+
Return type:
+

(n, r) torch.Tensor of monomial values.

+
+
+
+ +
+
+static kernel_matrix(x, kernel_func)[source]#
+

Returns radial function values for all pairs of points in x.

+
+
Parameters:
+
    +
  • x (torch.Tensor) – (n, d) tensor of points.

  • +
  • kernel_func (str) – Radial basis function to use.

  • +
+
+
Return type:
+

(n, n) torch.Tensor of radial function values.

+
+
+
+ +
+
+static monomial_powers(ndim, degree)[source]#
+

Return the powers for each monomial in a polynomial.

+
+
Parameters:
+
    +
  • ndim (int) – Number of variables in the polynomial.

  • +
  • degree (int) – Degree of the polynomial.

  • +
+
+
Return type:
+

(nmonos, ndim) torch.Tensor where each row contains the powers +for each variable in a monomial.

+
+
+
+ +
+
+static build(y, d, smoothing, kernel, epsilon, powers)[source]#
+

Build the RBF linear system.

+
+
Parameters:
+
    +
  • y (torch.Tensor) – (n, d) tensor of data points.

  • +
  • d (torch.Tensor) – (n, m) tensor of data values.

  • +
  • smoothing (torch.Tensor) – (n,) tensor of smoothing parameters.

  • +
  • kernel (str) – Radial basis function to use.

  • +
  • epsilon (float) – Shape parameter that scaled the input to the RBF.

  • +
  • powers (torch.Tensor) – (r, d) tensor of powers for each monomial.

  • +
+
+
Return type:
+

(lhs, rhs, shift, scale) where lhs and rhs are the +left-hand side and right-hand side of the linear system, and +shift and scale are the shift and scale parameters.

+
+
+
+ +
+
+static solve(y, d, smoothing, kernel, epsilon, powers)[source]#
+

Build then solve the RBF linear system.

+
+
Parameters:
+
    +
  • y (torch.Tensor) – (n, d) tensor of data points.

  • +
  • d (torch.Tensor) – (n, m) tensor of data values.

  • +
  • smoothing (torch.Tensor) – (n,) tensor of smoothing parameters.

  • +
  • kernel (str) – Radial basis function to use.

  • +
  • epsilon (float) – Shape parameter that scaled the input to the RBF.

  • +
  • powers (torch.Tensor) – (r, d) tensor of powers for each monomial.

  • +
+
+
Raises:
+

ValueError – If the linear system is singular.

+
+
Return type:
+

(shift, scale, coeffs) where shift and scale are the +shift and scale parameters, and coeffs are the coefficients +of the interpolator

+
+
+
+ +
+ +
+ + +
+ + + + + +
+ + + + + + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_rst/layers/residual.html b/_rst/layers/residual.html index a49ac1c6..c187cb1e 100644 --- a/_rst/layers/residual.html +++ b/_rst/layers/residual.html @@ -1,127 +1,573 @@ + - - - - - Residual layer — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Residual layer — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Residual layer

    -
    -
    -class ResidualBlock(input_dim, output_dim, hidden_dim, spectral_norm=False, activation=ReLU())[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    + + + + + +
    + +
    +

    Residual layer#

    +
    +
    +class ResidualBlock(input_dim, output_dim, hidden_dim, spectral_norm=False, activation=ReLU())[source]#
    +

    Bases: Module

    Residual block base class. Implementation of a residual block.

    See also

    @@ -133,7 +579,7 @@

    Residual layer -
    Parameters
    +
    Parameters:
    • input_dim (int) – Dimension of the input to pass to the feedforward linear layer.

    • @@ -143,62 +589,123 @@

      Residual layerbool) – Apply spectral normalization to feedforward layers, defaults to False.

      -
    • activation (torch.nn.Module) – Cctivation function after first block.

    • +
    • activation (torch.nn.Module) – Cctivation function after first block.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward pass for residual block layer.

    -
    Parameters
    -

    x (torch.Tensor) – Input tensor for the residual layer.

    +
    Parameters:
    +

    x (torch.Tensor) – Input tensor for the residual layer.

    -
    Returns
    +
    Returns:

    Output tensor for the residual layer.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/layers/spectral.html b/_rst/layers/spectral.html index 98f65fa0..7ce653e8 100644 --- a/_rst/layers/spectral.html +++ b/_rst/layers/spectral.html @@ -1,127 +1,573 @@ + - - - - - Spectral Convolution — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Spectral Convolution — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
-
- - - - - + + + + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/loss/loss_interface.html b/_rst/loss/loss_interface.html index 51049ae9..60955752 100644 --- a/_rst/loss/loss_interface.html +++ b/_rst/loss/loss_interface.html @@ -1,125 +1,578 @@ + - - - - - LpLoss — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + LpLoss — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    LpLoss

    +
    + + + + + +
    + +
    +

    LpLoss#

    Module for Loss class

    -
    -
    -class LossInterface(reduction='mean')[source]
    -

    Bases: torch.nn.modules.loss._Loss

    +
    +
    +class LossInterface(reduction='mean')[source]#
    +

    Bases: _Loss

    The abstract LossInterface class. All the class defining a PINA Loss should be inheritied from this class.

    -
    Parameters
    +
    Parameters:

    reduction (str) – Specifies the reduction to apply to the output: none | mean | sum. When none: no reduction will be applied, mean: the sum of the output will be divided @@ -129,61 +582,122 @@ those two args will override reduction. Default: mean.

    -
    -
    -abstract forward(input, target)[source]
    +
    +
    +abstract forward(input, target)[source]#

    Forward method for loss function.

    -
    Parameters
    +
    Parameters:
    -
    Returns
    +
    Returns:

    Loss evaluation.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/loss/lploss.html b/_rst/loss/lploss.html index d8af2153..37de66e5 100644 --- a/_rst/loss/lploss.html +++ b/_rst/loss/lploss.html @@ -1,120 +1,575 @@ + - - - - - LpLoss — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + LpLoss — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/loss/powerloss.html b/_rst/loss/powerloss.html index d8bb15b4..98ce7d66 100644 --- a/_rst/loss/powerloss.html +++ b/_rst/loss/powerloss.html @@ -1,120 +1,575 @@ + - - - - - PowerLoss — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + PowerLoss — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/avno.html b/_rst/models/avno.html index fd526d9b..6fce4e61 100644 --- a/_rst/models/avno.html +++ b/_rst/models/avno.html @@ -1,128 +1,574 @@ + - - - - - Averaging Neural Operator — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Averaging Neural Operator — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Averaging Neural Operator

    -
    -
    -class AveragingNeuralOperator(lifting_net, projecting_net, field_indices, coordinates_indices, n_layers=4, func=<class 'torch.nn.modules.activation.GELU'>)[source]
    -

    Bases: pina.model.base_no.KernelNeuralOperator

    +
    + + + + + +
    + +
    +

    Averaging Neural Operator#

    +
    +
    +class AveragingNeuralOperator(lifting_net, projecting_net, field_indices, coordinates_indices, n_layers=4, func=<class 'torch.nn.modules.activation.GELU'>)[source]#
    +

    Bases: KernelNeuralOperator

    Implementation of Averaging Neural Operator.

    Averaging Neural Operator is a general architecture for learning Operators. Unlike traditional machine learning methods @@ -137,13 +583,13 @@

    Averaging Neural OperatorarXiv preprint arXiv:2304.13221.

    -
    Parameters
    +
    Parameters:
      -
    • lifting_net (torch.nn.Module) – The neural network for lifting +

    • lifting_net (torch.nn.Module) – The neural network for lifting the input. It must take as input the input field and the coordinates at which the input field is avaluated. The output of the lifting net is chosen as embedding dimension of the problem

    • -
    • projecting_net (torch.nn.Module) – The neural network for +

    • projecting_net (torch.nn.Module) – The neural network for projecting the output. It must take as input the embedding dimension (output of the lifting_net) plus the dimension of the coordinates.

    • @@ -152,72 +598,133 @@

      Averaging Neural Operatorlist[str]) – the label of the coordinates in the input tensor.

    • n_layers (int) – number of hidden layers. Default is 4.

    • -
    • func (torch.nn.Module) – the activation function to use, +

    • func (torch.nn.Module) – the activation function to use, default to torch.nn.GELU.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward computation for Averaging Neural Operator. It performs a lifting of the input by the lifting_net. Then different layers of Averaging Neural Operator Blocks are applied. Finally the output is projected to the final dimensionality by the projecting_net.

    -
    Parameters
    -

    x (torch.Tensor) – The input tensor for fourier block, +

    Parameters:
    +

    x (torch.Tensor) – The input tensor for fourier block, depending on dimension in the initialization. It expects a tensor \(B \times N \times D\), where \(B\) is the batch_size, \(N\) the number of points in the mesh, \(D\) the dimension of the problem, i.e. the sum of len(coordinates_indices)+len(field_indices).

    -
    Returns
    +
    Returns:

    The output tensor obtained from Average Neural Operator.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/base_no.html b/_rst/models/base_no.html index 504f54d0..0946662a 100644 --- a/_rst/models/base_no.html +++ b/_rst/models/base_no.html @@ -1,128 +1,574 @@ + - - - - - KernelNeuralOperator — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + KernelNeuralOperator — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    KernelNeuralOperator

    -
    -
    -class KernelNeuralOperator(lifting_operator, integral_kernels, projection_operator)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    + + + + + +
    + +
    +

    KernelNeuralOperator#

    +
    +
    +class KernelNeuralOperator(lifting_operator, integral_kernels, projection_operator)[source]#
    +

    Bases: Module

    Base class for composing Neural Operators with integral kernels.

    This is a base class for composing neural operators with multiple integral kernels. All neural operator models defined in PINA inherit @@ -157,46 +603,46 @@

    KernelNeuralOperator

    -
    Parameters
    +
    Parameters:
    -
    -
    -property lifting_operator
    +
    +
    +property lifting_operator#

    The lifting operator property.

    -
    -
    -property projection_operator
    +
    +
    +property projection_operator#

    The projection operator property.

    -
    -
    -property integral_kernels
    +
    +
    +property integral_kernels#

    The integral kernels operator property.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward computation for Base Neural Operator. It performs a lifting of the input by the lifting_operator. Then different layers integral kernels are applied using integral_kernels. Finally the output is projected to the final dimensionality by the projection_operator.

    -
    Parameters
    -

    x (torch.Tensor) – The input tensor for performing the +

    Parameters:
    +

    x (torch.Tensor) – The input tensor for performing the computation. It expects a tensor \(B \times N \times D\), where \(B\) is the batch_size, \(N\) the number of points in the mesh, \(D\) the dimension of the problem. In particular @@ -204,50 +650,114 @@

    KernelNeuralOperator\(D=4\).

    -
    Returns
    +
    Returns:

    The output tensor obtained from the NO.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/deeponet.html b/_rst/models/deeponet.html index 3ce5caf5..4ae74b8e 100644 --- a/_rst/models/deeponet.html +++ b/_rst/models/deeponet.html @@ -1,128 +1,573 @@ + - - - - - DeepONet — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + DeepONet — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    DeepONet

    -
    -
    -class DeepONet(branch_net, trunk_net, input_indeces_branch_net, input_indeces_trunk_net, aggregator='*', reduction='+', scale=True, translation=True)[source]
    -

    Bases: pina.model.deeponet.MIONet

    +
    + + + + + +
    + +
    +

    DeepONet#

    +
    +
    +class DeepONet(branch_net, trunk_net, input_indeces_branch_net, input_indeces_trunk_net, aggregator='*', reduction='+', scale=True, translation=True)[source]#
    +

    Bases: MIONet

    The PINA implementation of DeepONet network.

    DeepONet is a general architecture for learning Operators. Unlike traditional machine learning methods DeepONet is designed to map @@ -136,37 +581,37 @@

    DeepONet10.1038/s42256-021-00302-5

    -
    Parameters
    +
    Parameters:
      -
    • branch_net (torch.nn.Module) – The neural network to use as branch +

    • branch_net (torch.nn.Module) – The neural network to use as branch model. It has to take as input a pina.label_tensor.LabelTensor -or torch.Tensor. The number of dimensions of the output has +or torch.Tensor. The number of dimensions of the output has to be the same of the trunk_net.

    • -
    • trunk_net (torch.nn.Module) – The neural network to use as trunk +

    • trunk_net (torch.nn.Module) – The neural network to use as trunk model. It has to take as input a pina.label_tensor.LabelTensor -or torch.Tensor. The number of dimensions of the output +or torch.Tensor. The number of dimensions of the output has to be the same of the branch_net.

    • -
    • or list(str) input_indeces_branch_net (list(int)) – List of indeces +

    • input_indeces_branch_net (list(int) or list(str)) – List of indeces to extract from the input variable in the forward pass for the branch net. If a list of int is passed, the corresponding columns of the inner most entries are extracted. If a list of str is passed the variables of the corresponding pina.label_tensor.LabelTensor are extracted.

    • -
    • or list(str) input_indeces_trunk_net (list(int)) – List of indeces +

    • input_indeces_trunk_net (list(int) or list(str)) – List of indeces to extract from the input variable in the forward pass for the trunk net. If a list of int is passed, the corresponding columns of the inner most entries are extracted. If a list of str is passed the variables of the corresponding pina.label_tensor.LabelTensor are extracted.

    • -
    • or Callable aggregator (str) – Aggregator to be used to aggregate +

    • aggregator (str or Callable) – Aggregator to be used to aggregate partial results from the modules in nets. Partial results are aggregated component-wise. Available aggregators include sum: +, product: *, mean: mean, min: min, max: max.

    • -
    • or Callable reduction (str) – Reduction to be used to reduce +

    • reduction (str or Callable) – Reduction to be used to reduce the aggregated result of the modules in nets to the desired output dimension. Available reductions include sum: +, product: *, mean: mean, min: min, max: max.

    • -
    • or Callable scale (bool) – Scaling the final output before returning the +

    • scale (bool or Callable) – Scaling the final output before returning the forward pass, default True.

    • -
    • or Callable translation (bool) – Translating the final output before +

    • translation (bool or Callable) – Translating the final output before returning the forward pass, default True.

    @@ -174,15 +619,15 @@

    DeepONet

    Warning

    In the forward pass we do not check if the input is instance of -pina.label_tensor.LabelTensor or torch.Tensor. A general rule is +pina.label_tensor.LabelTensor or torch.Tensor. A general rule is that for a pina.label_tensor.LabelTensor input both list of integers and list of strings can be passed for input_indeces_branch_net -and input_indeces_trunk_net. Differently, for a torch.Tensor +and input_indeces_trunk_net. Differently, for a torch.Tensor only a list of integers can be passed for input_indeces_branch_net and input_indeces_trunk_net.

    -
    Example
    +
    Example:
    >>> branch_net = FeedForward(input_dimensons=1, output_dimensions=10)
     >>> trunk_net = FeedForward(input_dimensons=1, output_dimensions=10)
     >>> model = DeepONet(branch_net=branch_net,
    @@ -216,70 +661,133 @@ 

    DeepONet -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Defines the computation performed at every call.

    -
    Parameters
    -

    or torch.Tensor x (LabelTensor) – The input tensor for the forward call.

    +
    Parameters:
    +

    x (LabelTensor or torch.Tensor) – The input tensor for the forward call.

    -
    Returns
    +
    Returns:

    The output computed by the DeepONet model.

    -
    Return type
    -

    LabelTensor or torch.Tensor

    +
    Return type:
    +

    LabelTensor or torch.Tensor

    -
    -
    -property branch_net
    +
    +
    +property branch_net#

    The branch net for DeepONet.

    -
    -
    -property trunk_net
    +
    +
    +property trunk_net#

    The trunk net for DeepONet.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/fnn.html b/_rst/models/fnn.html index 2b7f6e93..548fa4d8 100644 --- a/_rst/models/fnn.html +++ b/_rst/models/fnn.html @@ -1,132 +1,578 @@ + - - - - - FeedForward — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + FeedForward — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    FeedForward

    -
    -
    -class FeedForward(input_dimensions, output_dimensions, inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, layers=None, bias=True)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    + + + + + +
    + +
    +

    FeedForward#

    +
    +
    +class FeedForward(input_dimensions, output_dimensions, inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, layers=None, bias=True)[source]#
    +

    Bases: Module

    The PINA implementation of feedforward network, also refered as multilayer perceptron.

    -
    Parameters
    +
    Parameters:
    • input_dimensions (int) – The number of input components of the model. Expected tensor shape of the form \((*, d)\), where * @@ -137,69 +583,130 @@

      FeedForwardint) – number of neurons in the hidden layer(s). Default is 20.

    • n_layers (int) – number of hidden layers. Default is 2.

    • -
    • func (torch.nn.Module) – the activation function to use. If a single -torch.nn.Module is passed, this is used as activation function +

    • func (torch.nn.Module) – the activation function to use. If a single +torch.nn.Module is passed, this is used as activation function after any layers, except the last one. If a list of Modules is passed, they are used as activation functions at any layers, in order.

    • -
    • | tuple(int) layers (list(int)) – a list containing the number of neurons for +

    • layers (list(int) | tuple(int)) – a list containing the number of neurons for any hidden layers. If specified, the parameters n_layers e inner_size are not considered.

    • bias (bool) – If True the MLP will consider some bias.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Defines the computation performed at every call.

    -
    Parameters
    -

    x (torch.Tensor) – The tensor to apply the forward pass.

    +
    Parameters:
    +

    x (torch.Tensor) – The tensor to apply the forward pass.

    -
    Returns
    +
    Returns:

    the output computed by the model.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/fnn_residual.html b/_rst/models/fnn_residual.html index 52969855..6229285b 100644 --- a/_rst/models/fnn_residual.html +++ b/_rst/models/fnn_residual.html @@ -1,128 +1,574 @@ + - - - - - ResidualFeedForward — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + ResidualFeedForward — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    ResidualFeedForward

    -
    -
    -class ResidualFeedForward(input_dimensions, output_dimensions, inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, bias=True, transformer_nets=None)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    + + + + + +
    + +
    +

    ResidualFeedForward#

    +
    +
    +class ResidualFeedForward(input_dimensions, output_dimensions, inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, bias=True, transformer_nets=None)[source]#
    +

    Bases: Module

    The PINA implementation of feedforward network, also with skipped connection and transformer network, as presented in Understanding and mitigating gradient pathologies in physics-informed neural networks

    @@ -134,7 +580,7 @@

    ResidualFeedForward10.1137/20M1318043

    -
    Parameters
    +
    Parameters:
    • input_dimensions (int) – The number of input components of the model. Expected tensor shape of the form \((*, d)\), where * @@ -145,70 +591,131 @@

      ResidualFeedForwardint) – number of neurons in the hidden layer(s). Default is 20.

    • n_layers (int) – number of hidden layers. Default is 2.

    • -
    • func (torch.nn.Module) – the activation function to use. If a single -torch.nn.Module is passed, this is used as activation function +

    • func (torch.nn.Module) – the activation function to use. If a single +torch.nn.Module is passed, this is used as activation function after any layers, except the last one. If a list of Modules is passed, they are used as activation functions at any layers, in order.

    • bias (bool) – If True the MLP will consider some bias.

    • -
    • | tuple transformer_nets (list) – a list or tuple containing the two +

    • transformer_nets (list | tuple) – a list or tuple containing the two torch.nn.Module which act as transformer network. The input dimension of the network must be the same as input_dimensions, and the output dimension must be the same as inner_size.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Defines the computation performed at every call.

    -
    Parameters
    -

    x (torch.Tensor) – The tensor to apply the forward pass.

    +
    Parameters:
    +

    x (torch.Tensor) – The tensor to apply the forward pass.

    -
    Returns
    +
    Returns:

    the output computed by the model.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/fno.html b/_rst/models/fno.html index 09b1af16..1055ab42 100644 --- a/_rst/models/fno.html +++ b/_rst/models/fno.html @@ -1,128 +1,573 @@ + - - - - - FNO — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + FNO — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    FNO

    -
    -
    -class FNO(lifting_net, projecting_net, n_modes, dimensions=3, padding=8, padding_type='constant', inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, layers=None)[source]
    -

    Bases: pina.model.base_no.KernelNeuralOperator

    +
    + + + + + +
    + +
    +

    FNO#

    +
    +
    +class FNO(lifting_net, projecting_net, n_modes, dimensions=3, padding=8, padding_type='constant', inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, layers=None)[source]#
    +

    Bases: KernelNeuralOperator

    The PINA implementation of Fourier Neural Operator network.

    Fourier Neural Operator (FNO) is a general architecture for learning Operators. Unlike traditional machine learning methods @@ -138,33 +583,33 @@

    FNOarXiv preprint arXiv:2010.08895.

    -
    Parameters
    +
    Parameters:
      -
    • lifting_net (torch.nn.Module) – The neural network for lifting +

    • lifting_net (torch.nn.Module) – The neural network for lifting the input.

    • -
    • projecting_net (torch.nn.Module) – The neural network for +

    • projecting_net (torch.nn.Module) – The neural network for projecting the output.

    • -
    • | list[int] n_modes (int) – Number of modes.

    • +
    • n_modes (int | list[int]) – Number of modes.

    • dimensions (int) – Number of dimensions (1, 2, or 3).

    • padding (int) – Padding size, defaults to 8.

    • padding_type (str) – Type of padding, defaults to constant.

    • inner_size (int) – Inner size, defaults to 20.

    • n_layers (int) – Number of layers, defaults to 2.

    • -
    • func (torch.nn.Module) – Activation function, defaults to nn.Tanh.

    • +
    • func (torch.nn.Module) – Activation function, defaults to nn.Tanh.

    • layers (list[int]) – List of layer sizes, defaults to None.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward computation for Fourier Neural Operator. It performs a lifting of the input by the lifting_net. Then different layers of Fourier Blocks are applied. Finally the output is projected to the final dimensionality by the projecting_net.

    -
    Parameters
    -

    x (torch.Tensor) –

    The input tensor for fourier block, +

    Parameters:
    +

    x (torch.Tensor) –

    The input tensor for fourier block, depending on dimension in the initialization. In particular it is expected:

      @@ -174,50 +619,111 @@

      FNO

    -
    Returns
    +
    Returns:

    The output tensor obtained from FNO.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/fourier_kernel.html b/_rst/models/fourier_kernel.html index dc746ad0..957f53c8 100644 --- a/_rst/models/fourier_kernel.html +++ b/_rst/models/fourier_kernel.html @@ -1,128 +1,573 @@ + - - - - - FourierIntegralKernel — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + FourierIntegralKernel — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    FourierIntegralKernel

    -
    -
    -class FourierIntegralKernel(input_numb_fields, output_numb_fields, n_modes, dimensions=3, padding=8, padding_type='constant', inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, layers=None)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    + + + + + +
    + +
    +

    FourierIntegralKernel#

    +
    +
    +class FourierIntegralKernel(input_numb_fields, output_numb_fields, n_modes, dimensions=3, padding=8, padding_type='constant', inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, layers=None)[source]#
    +

    Bases: Module

    Implementation of Fourier Integral Kernel network.

    This class implements the Fourier Integral Kernel network, which is a PINA implementation of Fourier Neural Operator kernel network. @@ -136,31 +581,31 @@

    FourierIntegralKernelarXiv preprint arXiv:2010.08895.

    -
    Parameters
    +
    Parameters:
    • input_numb_fields (int) – Number of input fields.

    • output_numb_fields (int) – Number of output fields.

    • -
    • | list[int] n_modes (int) – Number of modes.

    • +
    • n_modes (int | list[int]) – Number of modes.

    • dimensions (int) – Number of dimensions (1, 2, or 3).

    • padding (int) – Padding size, defaults to 8.

    • padding_type (str) – Type of padding, defaults to “constant”.

    • inner_size (int) – Inner size, defaults to 20.

    • n_layers (int) – Number of layers, defaults to 2.

    • -
    • func (torch.nn.Module) – Activation function, defaults to nn.Tanh.

    • +
    • func (torch.nn.Module) – Activation function, defaults to nn.Tanh.

    • layers (list[int]) – List of layer sizes, defaults to None.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward computation for Fourier Neural Operator. It performs a lifting of the input by the lifting_net. Then different layers of Fourier Blocks are applied. Finally the output is projected to the final dimensionality by the projecting_net.

    -
    Parameters
    -

    x (torch.Tensor) –

    The input tensor for fourier block, +

    Parameters:
    +

    x (torch.Tensor) –

    The input tensor for fourier block, depending on dimension in the initialization. In particular it is expected:

      @@ -170,50 +615,111 @@

      FourierIntegralKernel

    -
    Returns
    +
    Returns:

    The output tensor obtained from the kernels convolution.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/lno.html b/_rst/models/lno.html index 0f907786..b6cfdbbd 100644 --- a/_rst/models/lno.html +++ b/_rst/models/lno.html @@ -1,128 +1,574 @@ + - - - - - Low Rank Neural Operator — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Low Rank Neural Operator — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Low Rank Neural Operator

    -
    -
    -class LowRankNeuralOperator(lifting_net, projecting_net, field_indices, coordinates_indices, n_kernel_layers, rank, inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, bias=True)[source]
    -

    Bases: pina.model.base_no.KernelNeuralOperator

    +
    + + + + + +
    + +
    +

    Low Rank Neural Operator#

    +
    +
    +class LowRankNeuralOperator(lifting_net, projecting_net, field_indices, coordinates_indices, n_kernel_layers, rank, inner_size=20, n_layers=2, func=<class 'torch.nn.modules.activation.Tanh'>, bias=True)[source]#
    +

    Bases: KernelNeuralOperator

    Implementation of LowRank Neural Operator.

    LowRank Neural Operator is a general architecture for learning Operators. Unlike traditional machine learning methods @@ -140,13 +586,13 @@

    Low Rank Neural Operator -
    Parameters
    +
    Parameters:
      -
    • lifting_net (torch.nn.Module) – The neural network for lifting +

    • lifting_net (torch.nn.Module) – The neural network for lifting the input. It must take as input the input field and the coordinates at which the input field is avaluated. The output of the lifting net is chosen as embedding dimension of the problem

    • -
    • projecting_net (torch.nn.Module) – The neural network for +

    • projecting_net (torch.nn.Module) – The neural network for projecting the output. It must take as input the embedding dimension (output of the lifting_net) plus the dimension of the coordinates.

    • @@ -162,7 +608,7 @@

      Low Rank Neural Operatortorch.nn.Module is passed, this is used as +torch.nn.Module is passed, this is used as activation function after any layers, except the last one. If a list of Modules is passed, they are used as activation functions at any layers, in order.

      @@ -171,67 +617,128 @@

      Low Rank Neural Operator -
      -forward(x)[source]
      +
      +
      +forward(x)[source]#

      Forward computation for LowRank Neural Operator. It performs a lifting of the input by the lifting_net. Then different layers of LowRank Neural Operator Blocks are applied. Finally the output is projected to the final dimensionality by the projecting_net.

      -
      Parameters
      -

      x (torch.Tensor) – The input tensor for fourier block, +

      Parameters:
      +

      x (torch.Tensor) – The input tensor for fourier block, depending on dimension in the initialization. It expects a tensor \(B \times N \times D\), where \(B\) is the batch_size, \(N\) the number of points in the mesh, \(D\) the dimension of the problem, i.e. the sum of len(coordinates_indices)+len(field_indices).

      -
      Returns
      +
      Returns:

      The output tensor obtained from Average Neural Operator.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/mionet.html b/_rst/models/mionet.html index 00eb8a7c..e604a683 100644 --- a/_rst/models/mionet.html +++ b/_rst/models/mionet.html @@ -1,128 +1,573 @@ + - - - - - MIONet — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + MIONet — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    MIONet

    -
    -
    -class MIONet(networks, aggregator='*', reduction='+', scale=True, translation=True)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    + + + + + +
    + +
    +

    MIONet#

    +
    +
    +class MIONet(networks, aggregator='*', reduction='+', scale=True, translation=True)[source]#
    +

    Bases: Module

    The PINA implementation of MIONet network.

    MIONet is a general architecture for learning Operators defined on the tensor product of Banach spaces. Unlike traditional machine @@ -136,7 +581,7 @@

    MIONet10.1137/22M1477751

    -
    Parameters
    +
    Parameters:
    • networks (dict) – The neural networks to use as models. The dict takes as key a neural network, and @@ -145,19 +590,19 @@

      MIONetstr is passed the variables of the corresponding pina.label_tensor.LabelTensor are extracted. The torch.nn.Module model has to take as input a -pina.label_tensor.LabelTensor or torch.Tensor. +pina.label_tensor.LabelTensor or torch.Tensor. Default implementation consist of different branch nets and one trunk nets.

    • -
    • or Callable aggregator (str) – Aggregator to be used to aggregate +

    • aggregator (str or Callable) – Aggregator to be used to aggregate partial results from the modules in nets. Partial results are aggregated component-wise. Available aggregators include sum: +, product: *, mean: mean, min: min, max: max.

    • -
    • or Callable reduction (str) – Reduction to be used to reduce +

    • reduction (str or Callable) – Reduction to be used to reduce the aggregated result of the modules in nets to the desired output dimension. Available reductions include sum: +, product: *, mean: mean, min: min, max: max.

    • -
    • or Callable scale (bool) – Scaling the final output before returning the +

    • scale (bool or Callable) – Scaling the final output before returning the forward pass, default True.

    • -
    • or Callable translation (bool) – Translating the final output before +

    • translation (bool or Callable) – Translating the final output before returning the forward pass, default True.

    @@ -165,21 +610,21 @@

    MIONet

    Warning

    In the forward pass we do not check if the input is instance of -pina.label_tensor.LabelTensor or torch.Tensor. A general rule is +pina.label_tensor.LabelTensor or torch.Tensor. A general rule is that for a pina.label_tensor.LabelTensor input both list of integers and list of strings can be passed for input_indeces_branch_net -and input_indeces_trunk_net. Differently, for a torch.Tensor +and input_indeces_trunk_net. Differently, for a torch.Tensor only a list of integers can be passed for input_indeces_branch_net and input_indeces_trunk_net.

    -
    Example
    +
    Example:
    >>> branch_net1 = FeedForward(input_dimensons=1, output_dimensions=10)
     >>> branch_net2 = FeedForward(input_dimensons=2, output_dimensions=10)
     >>> trunk_net = FeedForward(input_dimensons=1, output_dimensions=10)
     >>> networks = {branch_net1 : ['x'],
                     branch_net2 : ['x', 'y'],
    -...             trunk_net : ['z']}
    +...             trunk_net : ['z']}
     >>> model = MIONet(networks=networks,
     ...                reduction='+',
     ...                aggregator='*')
    @@ -219,94 +664,161 @@ 

    MIONet -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Defines the computation performed at every call.

    -
    Parameters
    -

    or torch.Tensor x (LabelTensor) – The input tensor for the forward call.

    +
    Parameters:
    +

    x (LabelTensor or torch.Tensor) – The input tensor for the forward call.

    -
    Returns
    +
    Returns:

    The output computed by the DeepONet model.

    -
    Return type
    -

    LabelTensor or torch.Tensor

    +
    Return type:
    +

    LabelTensor or torch.Tensor

    -
    -
    -property aggregator
    +
    +
    +property aggregator#

    The aggregator function.

    -
    -
    -property reduction
    +
    +
    +property reduction#

    The translation factor.

    -
    -
    -property scale
    +
    +
    +property scale#

    The scale factor.

    -
    -
    -property translation
    +
    +
    +property translation#

    The translation factor for MIONet.

    -
    -
    -property indeces_variables_extracted
    +
    +
    +property indeces_variables_extracted#

    The input indeces for each model in form of list.

    -
    -
    -property model
    +
    +
    +property model#

    The models in form of list.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/multifeedforward.html b/_rst/models/multifeedforward.html index 6ef49622..6eae02ae 100644 --- a/_rst/models/multifeedforward.html +++ b/_rst/models/multifeedforward.html @@ -1,173 +1,676 @@ + - - - - - MultiFeedForward — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + MultiFeedForward — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    MultiFeedForward

    -
    -
    -class MultiFeedForward(ffn_dict)[source]
    -

    Bases: torch.nn.modules.module.Module

    + +
    + + +
    +
    + + + + + +
    + +
    +

    MultiFeedForward#

    +
    +
    +class MultiFeedForward(ffn_dict)[source]#
    +

    Bases: Module

    The PINA implementation of MultiFeedForward network.

    This model allows to create a network with multiple FeedForward combined together. The user has to define the forward method choosing how to combine the different FeedForward networks.

    -
    Parameters
    +
    Parameters:

    ffn_dict (dict) – dictionary of FeedForward networks.

    Initialize internal Module state, shared by both nn.Module and ScriptModule.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/network.html b/_rst/models/network.html index ed450a13..d72f8107 100644 --- a/_rst/models/network.html +++ b/_rst/models/network.html @@ -1,147 +1,592 @@ + - - - - - Network — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Network — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Network

    -
    -
    -class Network(model, input_variables, output_variables, extra_features=None)[source]
    -

    Bases: torch.nn.modules.module.Module

    +
    + + + + + +
    + +
    +

    Network#

    +
    +
    +class Network(model, input_variables, output_variables, extra_features=None)[source]#
    +

    Bases: Module

    Network class with standard forward method and possibility to pass extra features. This class is used internally in PINA to convert -any torch.nn.Module s in a PINA module.

    +any torch.nn.Module s in a PINA module.

    -
    Parameters
    +
    Parameters:
      -
    • model (torch.nn.Module) – The torch model to convert in a PINA model.

    • +
    • model (torch.nn.Module) – The torch model to convert in a PINA model.

    • input_variables (list(str)) – The input variables of the AbstractProblem, whose type depends on the type of domain (spatial, temporal, and parameter).

    • output_variables (list(str)) – The output variables of the AbstractProblem, whose type depends on the problem setting.

    • -
    • extra_features (list(torch.nn.Module)) – List of torch models to augment the input, defaults to None.

    • +
    • extra_features (list(torch.nn.Module)) – List of torch models to augment the input, defaults to None.

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward method for Network class. This class implements the standard forward method, and it adds the possibility to pass extra features. @@ -149,18 +594,18 @@ by this class, to enable pina.label_tensor.LabelTensor labels extraction.

    -
    Parameters
    -

    x (torch.Tensor) – Input of the network.

    +
    Parameters:
    +

    x (torch.Tensor) – Input of the network.

    -
    Return torch.Tensor
    +
    Return torch.Tensor:

    Output of the network.

    -
    -
    -forward_map(x)[source]
    +
    +
    +forward_map(x)[source]#

    Forward method for Network class when the input is a tuple. This class is simply a forward with the input casted as a tuple or list :class`torch.Tensor`. @@ -168,10 +613,10 @@ by this class, to enable pina.label_tensor.LabelTensor labels extraction.

    -
    Parameters
    -

    (torch.Tensor) | tuple(torch.Tensor) x (list) – Input of the network.

    +
    Parameters:
    +

    x (list (torch.Tensor) | tuple(torch.Tensor)) – Input of the network.

    -
    Return torch.Tensor
    +
    Return torch.Tensor:

    Output of the network.

    @@ -184,39 +629,101 @@
    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/models/spline.html b/_rst/models/spline.html new file mode 100644 index 00000000..ea416b08 --- /dev/null +++ b/_rst/models/spline.html @@ -0,0 +1,719 @@ + + + + + + + + + + + Spline — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + + +
+ +
+ + +
+
+ +
+
+ +
+ +
+ + + + +
+ +
+ + +
+
+ + + + + +
+ +
+

Spline#

+
+
+class Spline(order=4, knots=None, control_points=None)[source]#
+

Bases: Module

+

Spline model.

+
+
Parameters:
+
    +
  • order (int) – the order of the spline.

  • +
  • knots (torch.Tensor) – the knot vector.

  • +
  • control_points (torch.Tensor) – the control points.

  • +
+
+
+
+
+basis(x, k, i, t)[source]#
+

Recursive function to compute the basis functions of the spline.

+
+
Parameters:
+
    +
  • x (torch.Tensor) – points to be evaluated.

  • +
  • k (int) – spline degree

  • +
  • i (int) – the index of the interval

  • +
  • t (torch.Tensor) – vector of knots

  • +
+
+
Returns:
+

the basis functions evaluated at x

+
+
Return type:
+

torch.Tensor

+
+
+
+ +
+
+forward(x_)[source]#
+

Forward pass of the spline model.

+
+
Parameters:
+

x (torch.Tensor) – points to be evaluated.

+
+
Returns:
+

the spline evaluated at x_

+
+
Return type:
+

torch.Tensor

+
+
+
+ +
+ +
+ + +
+ + + + + +
+ + + +
+ + +
+ + +
+
+
+ + + + + + + + \ No newline at end of file diff --git a/_rst/operators.html b/_rst/operators.html index 07dd773f..95ac6268 100644 --- a/_rst/operators.html +++ b/_rst/operators.html @@ -1,127 +1,581 @@ + - - - - - Operators — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Operators — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    Operators

    + +
    + + +
    +
    + + + + + +
    + +
    +

    Operators#

    Module for operators vectorize implementation. Differential operators are used to write any differential problem. These operators are implemented to work on different accellerators: CPU, GPU, TPU or MPS. All operators take as input a tensor onto which computing the operator, a tensor with respect to which computing the operator, the name of the output variables to calculate the operator for (in case of multidimensional functions), and the variables name on which the operator is calculated.

    -
    -
    -grad(output_, input_, components=None, d=None)[source]
    +
    +
    +grad(output_, input_, components=None, d=None)[source]#

    Perform gradient operation. The operator works for vectorial and scalar functions, with multiple input coordinates.

    -
    Parameters
    +
    Parameters:
    • output (LabelTensor) – the output tensor onto which computing the gradient.

    • @@ -135,22 +589,22 @@ input variables are considered. Default is None.

    -
    Returns
    +
    Returns:

    the gradient tensor.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    -
    -div(output_, input_, components=None, d=None)[source]
    +
    +
    +div(output_, input_, components=None, d=None)[source]#

    Perform divergence operation. The operator works for vectorial functions, with multiple input coordinates.

    -
    Parameters
    +
    Parameters:
    • output (LabelTensor) – the output tensor onto which computing the divergence.

    • @@ -164,7 +618,7 @@ the input variables are considered. Default is None.

    -
    Raises
    +
    Raises:
    • TypeError – div operator works only for LabelTensor.

    • ValueError – div operator works only for vector fields.

    • @@ -172,22 +626,22 @@ respect to all coordinates.

    -
    Returns
    +
    Returns:

    the divergenge tensor.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    -
    -laplacian(output_, input_, components=None, d=None, method='std')[source]
    +
    +
    +laplacian(output_, input_, components=None, d=None, method='std')[source]#

    Compute Laplace operator. The operator works for vectorial and scalar functions, with multiple input coordinates.

    -
    Parameters
    +
    Parameters:
    • output (LabelTensor) – the output tensor onto which computing the Laplacian.

    • @@ -202,29 +656,29 @@
    • method (str) – used method to calculate Laplacian, defaults to ‘std’.

    -
    Raises
    +
    Raises:
    • ValueError – for vectorial field derivative with respect to all coordinates must be performed.

    • NotImplementedError – ‘divgrad’ not implemented as method.

    -
    Returns
    +
    Returns:

    The tensor containing the result of the Laplacian operator.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    -
    -advection(output_, input_, velocity_field, components=None, d=None)[source]
    +
    +
    +advection(output_, input_, velocity_field, components=None, d=None)[source]#

    Perform advection operation. The operator works for vectorial functions, with multiple input coordinates.

    -
    Parameters
    +
    Parameters:
    • output (LabelTensor) – the output tensor onto which computing the advection.

    • @@ -240,48 +694,109 @@ the input variables are considered. Default is None.

    -
    Returns
    +
    Returns:

    the tensor containing the result of the advection operator.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/plotter.html b/_rst/plotter.html index 1504ef7c..4f185857 100644 --- a/_rst/plotter.html +++ b/_rst/plotter.html @@ -1,129 +1,581 @@ + - - - - - Plotter — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Plotter — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
+
+ + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/problem/abstractproblem.html b/_rst/problem/abstractproblem.html index 72f78f80..f3f9ae40 100644 --- a/_rst/problem/abstractproblem.html +++ b/_rst/problem/abstractproblem.html @@ -1,177 +1,629 @@ + - - - - - AbstractProblem — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + AbstractProblem — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    AbstractProblem

    +
    + + + + + +
    + +
    +

    AbstractProblem#

    Module for AbstractProblem class

    -
    -
    -class AbstractProblem[source]
    +
    +
    +class AbstractProblem[source]#

    Bases: object

    The abstract AbstractProblem class. All the class defining a PINA Problem should be inheritied from this class.

    In the definition of a PINA problem, the fundamental elements are: the output variables, the condition(s), and the domain(s) where the conditions are applied.

    -
    -
    -property domain
    +
    +
    +property domain#

    The domain(s) where the conditions of the AbstractProblem are valid. If more than one domain type is passed, a list of Location is retured.

    -
    Returns
    +
    Returns:

    the domain(s) of self

    -
    Return type
    +
    Return type:

    list[Location]

    -
    -
    -property input_variables
    +
    +
    +property input_variables#

    The input variables of the AbstractProblem, whose type depends on the type of domain (spatial, temporal, and parameter).

    -
    Returns
    +
    Returns:

    the input variables of self

    -
    Return type
    +
    Return type:

    list

    -
    -
    -abstract property output_variables
    +
    +
    +abstract property output_variables#

    The output variables of the problem.

    -
    -
    -abstract property conditions
    +
    +
    +abstract property conditions#

    The conditions of the problem.

    -
    -
    -discretise_domain(n, mode='random', variables='all', locations='all')[source]
    +
    +
    +discretise_domain(n, mode='random', variables='all', locations='all')[source]#

    Generate a set of points to span the Location of all the conditions of the problem.

    -
    Parameters
    +
    Parameters:
    • n (int) – Number of points to sample, see Note below for reference.

    • @@ -179,11 +631,11 @@ Available modes include: random sampling, random; latin hypercube sampling, latin or lh; chebyshev sampling, chebyshev; grid sampling grid.

      -
    • variables (str | list[str]) – problem’s variables to be sampled, defaults to ‘all’.

    • +
    • variables (str | list[str]) – problem’s variables to be sampled, defaults to ‘all’.

    • locations (str) – problem’s locations from where to sample, defaults to ‘all’.

    -
    Example
    +
    Example:
    >>> pinn.discretise_domain(n=10, mode='grid')
     >>> pinn.discretise_domain(n=10, mode='grid', location=['bound1'])
     >>> pinn.discretise_domain(n=10, mode='grid', variables=['x'])
    @@ -201,67 +653,135 @@
     
    -
    -
    -add_points(new_points)[source]
    +
    +
    +add_points(new_points)[source]#

    Adding points to the already sampled points.

    -
    Parameters
    +
    Parameters:

    new_points (dict) – a dictionary with key the location to add the points and values the torch.Tensor points.

    -
    -
    -property have_sampled_points
    +
    +
    +property have_sampled_points#

    Check if all points for Location are sampled.

    -
    -
    -property not_sampled_points
    +
    +
    +property not_sampled_points#

    Check which points are not sampled.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/problem/parametricproblem.html b/_rst/problem/parametricproblem.html index 1a94a7ea..3c5ffe90 100644 --- a/_rst/problem/parametricproblem.html +++ b/_rst/problem/parametricproblem.html @@ -1,149 +1,601 @@ + - - - - - ParametricProblem — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + ParametricProblem — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    ParametricProblem

    +
    + + + + + +
    + +
    +

    ParametricProblem#

    Module for the ParametricProblem class

    -
    -
    -class ParametricProblem[source]
    -

    Bases: pina.problem.abstract_problem.AbstractProblem

    +
    +
    +class ParametricProblem[source]#
    +

    Bases: AbstractProblem

    The class for the definition of parametric problems, i.e., problems with parameters among the input variables.

    Here’s an example of a spatial parametric ODE problem, i.e., a spatial ODE problem with an additional parameter alpha as coefficient of the derivative term.

    -
    Example
    +
    Example:
    >>> from pina.problem import SpatialProblem, ParametricProblem
     >>> from pina.operators import grad
     >>> from pina.equations import Equation, FixedValue
     >>> from pina import Condition
     >>> from pina.geometry import CartesianDomain
     >>> import torch
    ->>>
    ->>>
    +>>>
    +>>>
     >>> class ParametricODE(SpatialProblem, ParametricProblem):
    ->>>
    +>>>
     >>>     output_variables = ['u']
     >>>     spatial_domain = CartesianDomain({'x': [0, 1]})
     >>>     parameter_domain = CartesianDomain({'alpha': [1, 10]})
    ->>>
    +>>>
     >>>     def ode_equation(input_, output_):
     >>>         u_x = grad(output_, input_, components=['u'], d=['x'])
     >>>         u = output_.extract(['u'])
     >>>         alpha = input_.extract(['alpha'])
     >>>         return alpha * u_x - u
    ->>>
    +>>>
     >>>     conditions = {
     >>>         'x0': Condition(CartesianDomain({'x': 0, 'alpha':[1, 10]}), FixedValue(1.)),
     >>>         'D': Condition(CartesianDomain({'x': [0, 1], 'alpha':[1, 10]}), Equation(ode_equation))}
    @@ -151,53 +603,115 @@
     
    -
    -
    -abstract parameter_domain()[source]
    +
    +
    +abstract parameter_domain()[source]#

    The parameters’ domain of the problem.

    -
    -
    -property parameters
    +
    +
    +property parameters#

    The parameters’ variables of the problem.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/problem/spatialproblem.html b/_rst/problem/spatialproblem.html index 9103c0c5..d8db0896 100644 --- a/_rst/problem/spatialproblem.html +++ b/_rst/problem/spatialproblem.html @@ -1,145 +1,597 @@ + - - - - - SpatialProblem — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + SpatialProblem — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    SpatialProblem

    +
    + + + + + +
    + +
    +

    SpatialProblem#

    Module for the SpatialProblem class

    -
    -
    -class SpatialProblem[source]
    -

    Bases: pina.problem.abstract_problem.AbstractProblem

    +
    +
    +class SpatialProblem[source]#
    +

    Bases: AbstractProblem

    The class for the definition of spatial problems, i.e., for problems with spatial input variables.

    Here’s an example of a spatial 1-dimensional ODE problem.

    -
    Example
    +
    Example:
    >>> from pina.problem import SpatialProblem
     >>> from pina.operators import grad
     >>> from pina.equation import Equation, FixedValue
     >>> from pina import Condition
     >>> from pina.geometry import CartesianDomain
     >>> import torch
    ->>>
    ->>>
    +>>>
    +>>>
     >>> class SpatialODE(SpatialProblem:
    ->>>
    +>>>
     >>>     output_variables = ['u']
     >>>     spatial_domain = CartesianDomain({'x': [0, 1]})
    ->>>
    +>>>
     >>>     def ode_equation(input_, output_):
     >>>         u_x = grad(output_, input_, components=['u'], d=['x'])
     >>>         u = output_.extract(['u'])
     >>>         return u_x - u
    ->>>
    +>>>
     >>>     conditions = {
     >>>         'x0': Condition(CartesianDomain({'x': 0, 'alpha':[1, 10]}), FixedValue(1.)),
     >>>         'D': Condition(CartesianDomain({'x': [0, 1], 'alpha':[1, 10]}), Equation(ode_equation))}
    @@ -147,53 +599,115 @@
     
    -
    -
    -abstract spatial_domain()[source]
    +
    +
    +abstract spatial_domain()[source]#

    The spatial domain of the problem.

    -
    -
    -property spatial_variables
    +
    +
    +property spatial_variables#

    The spatial input variables of the problem.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/problem/timedepproblem.html b/_rst/problem/timedepproblem.html index 10d97248..0e6d940a 100644 --- a/_rst/problem/timedepproblem.html +++ b/_rst/problem/timedepproblem.html @@ -1,153 +1,605 @@ + - - - - - TimeDependentProblem — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + TimeDependentProblem — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    TimeDependentProblem

    +
    + + + + + +
    + +
    +

    TimeDependentProblem#

    Module for the TimeDependentProblem class

    -
    -
    -class TimeDependentProblem[source]
    -

    Bases: pina.problem.abstract_problem.AbstractProblem

    +
    +
    +class TimeDependentProblem[source]#
    +

    Bases: AbstractProblem

    The class for the definition of time-dependent problems, i.e., for problems depending on time.

    Here’s an example of a 1D wave problem.

    -
    Example
    +
    Example:
    >>> from pina.problem import SpatialProblem, TimeDependentProblem
     >>> from pina.operators import grad, laplacian
     >>> from pina.equation import Equation, FixedValue
     >>> from pina import Condition
     >>> from pina.geometry import CartesianDomain
     >>> import torch
    ->>>
    ->>>
    +>>>
    +>>>
     >>> class Wave(TimeDependentSpatialProblem):
    ->>>
    +>>>
     >>>     output_variables = ['u']
     >>>     spatial_domain = CartesianDomain({'x': [0, 3]})
     >>>     temporal_domain = CartesianDomain({'t': [0, 1]})
    ->>>
    +>>>
     >>>     def wave_equation(input_, output_):
     >>>         u_t = grad(output_, input_, components=['u'], d=['t'])
     >>>         u_tt = grad(u_t, input_, components=['dudt'], d=['t'])
     >>>         delta_u = laplacian(output_, input_, components=['u'], d=['x'])
     >>>         return delta_u - u_tt
    ->>>
    +>>>
     >>>     def initial_condition(input_, output_):
     >>>         u_expected = (-3*torch.sin(2*torch.pi*input_.extract(['x']))
     >>>             + 5*torch.sin(8/3*torch.pi*input_.extract(['x'])))
     >>>         u = output_.extract(['u'])
     >>>         return u - u_expected
    ->>>
    +>>>
     >>>     conditions = {
     >>>         't0': Condition(CartesianDomain({'x': [0, 3], 't':0}), Equation(initial_condition)),
     >>>         'gamma1': Condition(CartesianDomain({'x':0, 't':[0, 1]}), FixedValue(0.)),
    @@ -157,53 +609,115 @@
     
    -
    -
    -abstract temporal_domain()[source]
    +
    +
    +abstract temporal_domain()[source]#

    The temporal domain of the problem.

    -
    -
    -property temporal_variable
    +
    +
    +property temporal_variable#

    The time variable of the problem.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/basepinn.html b/_rst/solvers/basepinn.html index 25b1ad36..adbccdc9 100644 --- a/_rst/solvers/basepinn.html +++ b/_rst/solvers/basepinn.html @@ -1,128 +1,573 @@ + - - - - - PINNInterface — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + PINNInterface — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    PINNInterface

    -
    -
    -class PINNInterface(models, problem, optimizers, optimizers_kwargs, extra_features, loss)[source]
    -

    Bases: pina.solvers.solver.SolverInterface

    +
    + + + + + +
    + +
    +

    PINNInterface#

    +
    +
    +class PINNInterface(models, problem, optimizers, optimizers_kwargs, extra_features, loss)[source]#
    +

    Bases: SolverInterface

    Base PINN solver class. This class implements the Solver Interface for Physics Informed Neural Network solvers.

    This class can be used to @@ -132,153 +577,153 @@

    PINNInterface -
    Parameters
    +
    Parameters:

    -
    -
    -training_step(batch, _)[source]
    +
    +
    +training_step(batch, _)[source]#

    The Physics Informed Solver Training Step. This function takes care of the physics informed training step, and it must not be override if not intentionally. It handles the batching mechanism, the workload division for the various conditions, the inverse problem clamping, and loggers.

    -
    Parameters
    +
    Parameters:
    • batch (tuple) – The batch element in the dataloader.

    • batch_idx (int) – The batch index.

    -
    Returns
    +
    Returns:

    The sum of the loss functions.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    -
    -loss_data(input_tensor, output_tensor)[source]
    +
    +
    +loss_data(input_tensor, output_tensor)[source]#

    The data loss for the PINN solver. It computes the loss between the network output against the true solution. This function should not be override if not intentionally.

    -
    Parameters
    +
    Parameters:
    • input_tensor (LabelTensor) – The input to the neural networks.

    • output_tensor (LabelTensor) – The true solution to compare the network solution.

    -
    Returns
    +
    Returns:

    The residual loss averaged on the input coordinates

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    -
    -abstract loss_phys(samples, equation)[source]
    +
    +
    +abstract loss_phys(samples, equation)[source]#

    Computes the physics loss for the physics informed solver based on given samples and equation. This method must be override by all inherited classes and it is the core to define a new physics informed solver.

    -
    Parameters
    +
    Parameters:
    • samples (LabelTensor) – The samples to evaluate the physics loss.

    • equation (EquationInterface) – The governing equation representing the physics.

    -
    Returns
    +
    Returns:

    The physics loss calculated based on given samples and equation.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    -
    -compute_residual(samples, equation)[source]
    +
    +
    +compute_residual(samples, equation)[source]#

    Compute the residual for Physics Informed learning. This function returns the Equation specified in the Condition evaluated at the samples points.

    -
    Parameters
    +
    Parameters:
    • samples (LabelTensor) – The samples to evaluate the physics loss.

    • equation (EquationInterface) – The governing equation representing the physics.

    -
    Returns
    +
    Returns:

    The residual of the neural network solution.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    -
    -store_log(loss_value)[source]
    +
    +
    +store_log(loss_value)[source]#

    Stores the loss value in the logger. This function should be called for all conditions. It automatically handles the storing conditions names. It must be used anytime a specific variable wants to be stored for a specific condition. A simple example is to use the variable to store the residual.

    -
    Parameters
    +
    Parameters:
    • name (str) – The name of the loss.

    • -
    • loss_value (torch.Tensor) – The value of the loss.

    • +
    • loss_value (torch.Tensor) – The value of the loss.

    -
    -
    -on_train_epoch_end()[source]
    +
    +
    +save_logs_and_release()[source]#

    At the end of each epoch we free the stored losses. This function should not be override if not intentionally.

    -
    -
    -property loss
    +
    +
    +property loss#

    Loss used for training.

    -
    -
    -property current_condition_name
    +
    +
    +property current_condition_name#

    Returns the condition name. This function can be used inside the loss_phys() to extract the condition at which the loss is computed.

    @@ -286,39 +731,107 @@

    PINNInterface - - -

    + + + + + + +
    + + + +
+ - +
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/causalpinn.html b/_rst/solvers/causalpinn.html index 3cbde919..e16a764a 100644 --- a/_rst/solvers/causalpinn.html +++ b/_rst/solvers/causalpinn.html @@ -1,128 +1,574 @@ + - - - - - CausalPINN — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + CausalPINN — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    CausalPINN

    -
    -
    -class CausalPINN(problem, model, extra_features=None, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0}, eps=100)[source]
    -

    Bases: pina.solvers.pinns.pinn.PINN

    +
    + + + + + +
    + +
    +

    CausalPINN#

    +
    +
    +class CausalPINN(problem, model, extra_features=None, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0}, eps=100)[source]#
    +

    Bases: PINN

    Causal Physics Informed Neural Network (PINN) solver class. This class implements Causal Physics Informed Neural Network solvers, using a user specified model to solve a specific @@ -169,90 +615,152 @@

    CausalPINNTimeDependentProblem class.

    -
    Parameters
    +
    Parameters:
    • problem (AbstractProblem) – The formulation of the problem.

    • -
    • model (torch.nn.Module) – The neural network model to use.

    • -
    • loss (torch.nn.Module) – The loss function used as minimizer, -default torch.nn.MSELoss.

    • -
    • extra_features (torch.nn.Module) – The additional input +

    • model (torch.nn.Module) – The neural network model to use.

    • +
    • loss (torch.nn.Module) – The loss function used as minimizer, +default torch.nn.MSELoss.

    • +
    • extra_features (torch.nn.Module) – The additional input features to use as augmented input.

    • -
    • optimizer (torch.optim.Optimizer) – The neural network optimizer to -use; default is torch.optim.Adam.

    • +
    • optimizer (torch.optim.Optimizer) – The neural network optimizer to +use; default is torch.optim.Adam.

    • optimizer_kwargs (dict) – Optimizer constructor keyword args.

    • scheduler (torch.optim.LRScheduler) – Learning rate scheduler.

    • scheduler_kwargs (dict) – LR scheduler constructor keyword args.

    • -
    • | float eps (int) – The exponential decay parameter. Note that this +

    • eps (int | float) – The exponential decay parameter. Note that this value is kept fixed during the training, but can be changed by means of a callback, e.g. for annealing.

    -
    -
    -loss_phys(samples, equation)[source]
    +
    +
    +loss_phys(samples, equation)[source]#

    Computes the physics loss for the Causal PINN solver based on given samples and equation.

    -
    Parameters
    +
    Parameters:
    • samples (LabelTensor) – The samples to evaluate the physics loss.

    • equation (EquationInterface) – The governing equation representing the physics.

    -
    Returns
    +
    Returns:

    The physics loss calculated based on given samples and equation.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    -
    -property eps
    +
    +
    +property eps#

    The exponential decay parameter.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/competitivepinn.html b/_rst/solvers/competitivepinn.html index e941a296..68fad5d7 100644 --- a/_rst/solvers/competitivepinn.html +++ b/_rst/solvers/competitivepinn.html @@ -1,128 +1,574 @@ + - - - - - CompetitivePINN — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + CompetitivePINN — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    CompetitivePINN

    -
    -
    -class CompetitivePINN(problem, model, discriminator=None, loss=MSELoss(), optimizer_model=<class 'torch.optim.adam.Adam'>, optimizer_model_kwargs={'lr': 0.001}, optimizer_discriminator=<class 'torch.optim.adam.Adam'>, optimizer_discriminator_kwargs={'lr': 0.001}, scheduler_model=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_model_kwargs={'factor': 1, 'total_iters': 0}, scheduler_discriminator=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_discriminator_kwargs={'factor': 1, 'total_iters': 0})[source]
    -

    Bases: pina.solvers.pinns.basepinn.PINNInterface

    +
    + + + + + +
    + +
    +

    CompetitivePINN#

    +
    +
    +class CompetitivePINN(problem, model, discriminator=None, loss=MSELoss(), optimizer_model=<class 'torch.optim.adam.Adam'>, optimizer_model_kwargs={'lr': 0.001}, optimizer_discriminator=<class 'torch.optim.adam.Adam'>, optimizer_discriminator_kwargs={'lr': 0.001}, scheduler_model=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_model_kwargs={'factor': 1, 'total_iters': 0}, scheduler_discriminator=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_discriminator_kwargs={'factor': 1, 'total_iters': 0})[source]#
    +

    Bases: PINNInterface

    Competitive Physics Informed Neural Network (PINN) solver class. This class implements Competitive Physics Informed Neural Network solvers, using a user specified model to solve a specific @@ -161,22 +607,22 @@

    CompetitivePINNextra_feature.

    -
    Parameters
    +
    Parameters:
    • problem (AbstractProblem) – The formualation of the problem.

    • -
    • model (torch.nn.Module) – The neural network model to use +

    • model (torch.nn.Module) – The neural network model to use for the model.

    • -
    • discriminator (torch.nn.Module) – The neural network model to use +

    • discriminator (torch.nn.Module) – The neural network model to use for the discriminator. If None, the discriminator network will have the same architecture as the model network.

    • -
    • loss (torch.nn.Module) – The loss function used as minimizer, -default torch.nn.MSELoss.

    • -
    • optimizer_model (torch.optim.Optimizer) – The neural +

    • loss (torch.nn.Module) – The loss function used as minimizer, +default torch.nn.MSELoss.

    • +
    • optimizer_model (torch.optim.Optimizer) – The neural network optimizer to use for the model network , default is torch.optim.Adam.

    • optimizer_model_kwargs (dict) – Optimizer constructor keyword args. for the model.

    • -
    • optimizer_discriminator (torch.optim.Optimizer) – The neural +

    • optimizer_discriminator (torch.optim.Optimizer) – The neural network optimizer to use for the discriminator network , default is torch.optim.Adam.

    • optimizer_discriminator_kwargs (dict) – Optimizer constructor @@ -190,189 +636,189 @@

      CompetitivePINN -
      -forward(x)[source]
      +
      +
      +forward(x)[source]#

      Forward pass implementation for the PINN solver. It returns the function evaluation \(\mathbf{u}(\mathbf{x})\) at the control points \(\mathbf{x}\).

      -
      Parameters
      +
      Parameters:

      x (LabelTensor) – Input tensor for the PINN solver. It expects a tensor \(N \times D\), where \(N\) the number of points in the mesh, \(D\) the dimension of the problem,

      -
      Returns
      +
      Returns:

      PINN solution evaluated at contro points.

      -
      Return type
      +
      Return type:

      LabelTensor

      -
      -
      -loss_phys(samples, equation)[source]
      +
      +
      +loss_phys(samples, equation)[source]#

      Computes the physics loss for the Competitive PINN solver based on given samples and equation.

      -
      Parameters
      +
      Parameters:
      • samples (LabelTensor) – The samples to evaluate the physics loss.

      • equation (EquationInterface) – The governing equation representing the physics.

      -
      Returns
      +
      Returns:

      The physics loss calculated based on given samples and equation.

      -
      Return type
      +
      Return type:

      LabelTensor

      -
      -
      -loss_data(input_tensor, output_tensor)[source]
      +
      +
      +loss_data(input_tensor, output_tensor)[source]#

      The data loss for the PINN solver. It computes the loss between the network output against the true solution.

      -
      Parameters
      +
      Parameters:
      • input_tensor (LabelTensor) – The input to the neural networks.

      • output_tensor (LabelTensor) – The true solution to compare the network solution.

      -
      Returns
      +
      Returns:

      The computed data loss.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

      -
      -
      -configure_optimizers()[source]
      +
      +
      +configure_optimizers()[source]#

      Optimizer configuration for the Competitive PINN solver.

      -
      Returns
      +
      Returns:

      The optimizers and the schedulers

      -
      Return type
      +
      Return type:

      tuple(list, list)

      -
      -
      -on_train_batch_end(outputs, batch, batch_idx)[source]
      +
      +
      +on_train_batch_end(outputs, batch, batch_idx)[source]#

      This method is called at the end of each training batch, and ovverides the PytorchLightining implementation for logging the checkpoints.

      -
      Parameters
      +
      Parameters:
        -
      • outputs (torch.Tensor) – The output from the model for the +

      • outputs (torch.Tensor) – The output from the model for the current batch.

      • batch (tuple) – The current batch of data.

      • batch_idx (int) – The index of the current batch.

      -
      Returns
      +
      Returns:

      Whatever is returned by the parent method on_train_batch_end.

      -
      Return type
      +
      Return type:

      Any

      -
      -
      -property neural_net
      +
      +
      +property neural_net#

      Returns the neural network model.

      -
      Returns
      +
      Returns:

      The neural network model.

      -
      Return type
      -

      torch.nn.Module

      +
      Return type:
      +

      torch.nn.Module

      -
      -
      -property discriminator
      +
      +
      +property discriminator#

      Returns the discriminator model (if applicable).

      -
      Returns
      +
      Returns:

      The discriminator model.

      -
      Return type
      -

      torch.nn.Module

      +
      Return type:
      +

      torch.nn.Module

      -
      -
      -property optimizer_model
      +
      +
      +property optimizer_model#

      Returns the optimizer associated with the neural network model.

      -
      Returns
      +
      Returns:

      The optimizer for the neural network model.

      -
      Return type
      -

      torch.optim.Optimizer

      +
      Return type:
      +

      torch.optim.Optimizer

      -
      -
      -property optimizer_discriminator
      +
      +
      +property optimizer_discriminator#

      Returns the optimizer associated with the discriminator (if applicable).

      -
      Returns
      +
      Returns:

      The optimizer for the discriminator.

      -
      Return type
      -

      torch.optim.Optimizer

      +
      Return type:
      +

      torch.optim.Optimizer

      -
      -
      -property scheduler_model
      +
      +
      +property scheduler_model#

      Returns the scheduler associated with the neural network model.

      -
      Returns
      +
      Returns:

      The scheduler for the neural network model.

      -
      Return type
      +
      Return type:

      torch.optim.lr_scheduler._LRScheduler

      -
      -
      -property scheduler_discriminator
      +
      +
      +property scheduler_discriminator#

      Returns the scheduler associated with the discriminator (if applicable).

      -
      Returns
      +
      Returns:

      The scheduler for the discriminator.

      -
      Return type
      +
      Return type:

      torch.optim.lr_scheduler._LRScheduler

      @@ -380,39 +826,110 @@

      CompetitivePINN - - -

    + + + + + + +
    + + + +
+ - +
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/garom.html b/_rst/solvers/garom.html index e8403fbf..2c2d435f 100644 --- a/_rst/solvers/garom.html +++ b/_rst/solvers/garom.html @@ -1,128 +1,573 @@ + - - - - - GAROM — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + GAROM — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    GAROM

    -
    -
    -class GAROM(problem, generator, discriminator, loss=None, optimizer_generator=<class 'torch.optim.adam.Adam'>, optimizer_generator_kwargs={'lr': 0.001}, optimizer_discriminator=<class 'torch.optim.adam.Adam'>, optimizer_discriminator_kwargs={'lr': 0.001}, scheduler_generator=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_generator_kwargs={'factor': 1, 'total_iters': 0}, scheduler_discriminator=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_discriminator_kwargs={'factor': 1, 'total_iters': 0}, gamma=0.3, lambda_k=0.001, regularizer=False)[source]
    -

    Bases: pina.solvers.solver.SolverInterface

    +
    + + + + + +
    + +
    +

    GAROM#

    +
    +
    +class GAROM(problem, generator, discriminator, loss=None, optimizer_generator=<class 'torch.optim.adam.Adam'>, optimizer_generator_kwargs={'lr': 0.001}, optimizer_discriminator=<class 'torch.optim.adam.Adam'>, optimizer_discriminator_kwargs={'lr': 0.001}, scheduler_generator=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_generator_kwargs={'factor': 1, 'total_iters': 0}, scheduler_discriminator=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_discriminator_kwargs={'factor': 1, 'total_iters': 0}, gamma=0.3, lambda_k=0.001, regularizer=False)[source]#
    +

    Bases: SolverInterface

    GAROM solver class. This class implements Generative Adversarial Reduced Order Model solver, using user specified models to solve a specific order reduction``problem``.

    @@ -133,22 +578,22 @@

    GAROM DOI: arXiv preprint arXiv:2305.15881..

    -
    Parameters
    +
    Parameters:
    -
    -
    -forward(x, mc_steps=20, variance=False)[source]
    +
    +
    +forward(x, mc_steps=20, variance=False)[source]#

    Forward step for GAROM solver

    -
    Parameters
    +
    Parameters:
      -
    • x (torch.Tensor) – The input tensor.

    • +
    • x (torch.Tensor) – The input tensor.

    • mc_steps (int) – Number of montecarlo samples to approximate the expected value, defaults to 20.

    • variance (bool) – Returining also the sample variance of the solution, defaults to False.

    -
    Returns
    +
    Returns:

    The expected value of the generator distribution. If variance=True also the sample variance is returned.

    -
    Return type
    -

    torch.Tensor | tuple(torch.Tensor, torch.Tensor)

    +
    Return type:
    +

    torch.Tensor | tuple(torch.Tensor, torch.Tensor)

    -
    -
    -configure_optimizers()[source]
    +
    +
    +configure_optimizers()[source]#

    Optimizer configuration for the GAROM solver.

    -
    Returns
    +
    Returns:

    The optimizers and the schedulers

    -
    Return type
    +
    Return type:

    tuple(list, list)

    -
    -
    -training_step(batch, batch_idx)[source]
    +
    +
    +training_step(batch, batch_idx)[source]#

    GAROM solver training step.

    -
    Parameters
    +
    Parameters:
    • batch (tuple) – The batch element in the dataloader.

    • batch_idx (int) – The batch index.

    -
    Returns
    +
    Returns:

    The sum of the loss functions.

    -
    Return type
    +
    Return type:

    LabelTensor

    @@ -231,39 +676,102 @@

    GAROM

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/gpinn.html b/_rst/solvers/gpinn.html index 3640d95b..a5b7663c 100644 --- a/_rst/solvers/gpinn.html +++ b/_rst/solvers/gpinn.html @@ -1,128 +1,574 @@ + - - - - - GPINN — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + GPINN — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    GPINN

    -
    -
    -class GPINN(problem, model, extra_features=None, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0})[source]
    -

    Bases: pina.solvers.pinns.pinn.PINN

    +
    + + + + + +
    + +
    +

    GPINN#

    +
    +
    +class GPINN(problem, model, extra_features=None, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0})[source]#
    +

    Bases: PINN

    Gradient Physics Informed Neural Network (GPINN) solver class. This class implements Gradient Physics Informed Neural Network solvers, using a user specified model to solve a specific @@ -164,19 +610,19 @@

    GPINN class.

    -
    Parameters
    +
    Parameters:
    -
    -
    -loss_phys(samples, equation)[source]
    +
    +
    +loss_phys(samples, equation)[source]#

    Computes the physics loss for the GPINN solver based on given samples and equation.

    -
    Parameters
    +
    Parameters:
    • samples (LabelTensor) – The samples to evaluate the physics loss.

    • equation (EquationInterface) – The governing equation representing the physics.

    -
    Returns
    +
    Returns:

    The physics loss calculated based on given samples and equation.

    -
    Return type
    +
    Return type:

    LabelTensor

    @@ -209,39 +655,100 @@

    GPINN

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/pinn.html b/_rst/solvers/pinn.html index 4313e3e4..bde367e5 100644 --- a/_rst/solvers/pinn.html +++ b/_rst/solvers/pinn.html @@ -1,128 +1,574 @@ + - - - - - PINN — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + PINN — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    PINN

    -
    -
    -class PINN(problem, model, extra_features=None, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0})[source]
    -

    Bases: pina.solvers.pinns.basepinn.PINNInterface

    +
    + + + + + +
    + +
    +

    PINN#

    +
    +
    +class PINN(problem, model, extra_features=None, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0})[source]#
    +

    Bases: PINNInterface

    Physics Informed Neural Network (PINN) solver class. This class implements Physics Informed Neural Network solvers, using a user specified model to solve a specific @@ -153,16 +599,16 @@

    PINN¶ DOI: 10.1038.

    -
    Parameters
    +
    Parameters:
    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward pass implementation for the PINN solver. It returns the function evaluation \(\mathbf{u}(\mathbf{x})\) at the control points \(\mathbf{x}\).

    -
    Parameters
    +
    Parameters:

    x (LabelTensor) – Input tensor for the PINN solver. It expects a tensor \(N \times D\), where \(N\) the number of points in the mesh, \(D\) the dimension of the problem,

    -
    Returns
    +
    Returns:

    PINN solution evaluated at contro points.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    -
    -loss_phys(samples, equation)[source]
    +
    +
    +loss_phys(samples, equation)[source]#

    Computes the physics loss for the PINN solver based on given samples and equation.

    -
    Parameters
    +
    Parameters:
    • samples (LabelTensor) – The samples to evaluate the physics loss.

    • equation (EquationInterface) – The governing equation representing the physics.

    -
    Returns
    +
    Returns:

    The physics loss calculated based on given samples and equation.

    -
    Return type
    +
    Return type:

    LabelTensor

    -
    -
    -configure_optimizers()[source]
    +
    +
    +configure_optimizers()[source]#

    Optimizer configuration for the PINN solver.

    -
    Returns
    +
    Returns:

    The optimizers and the schedulers

    -
    Return type
    +
    Return type:

    tuple(list, list)

    -
    -
    -property scheduler
    +
    +
    +property scheduler#

    Scheduler for the PINN training.

    -
    -
    -property neural_net
    +
    +
    +property neural_net#

    Neural network for the PINN training.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/rba_pinn.html b/_rst/solvers/rba_pinn.html index 10231ff1..5f083d1e 100644 --- a/_rst/solvers/rba_pinn.html +++ b/_rst/solvers/rba_pinn.html @@ -1,128 +1,574 @@ + - - - - - RBAPINN — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + RBAPINN — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    RBAPINN

    -
    -
    -class RBAPINN(problem, model, extra_features=None, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0}, eta=0.001, gamma=0.999)[source]
    -

    Bases: pina.solvers.pinns.pinn.PINN

    +
    + + + + + +
    + +
    +

    RBAPINN#

    +
    +
    +class RBAPINN(problem, model, extra_features=None, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0}, eta=0.001, gamma=0.999)[source]#
    +

    Bases: PINN

    Residual-based Attention PINN (RBAPINN) solver class. This class implements Residual-based Attention Physics Informed Neural Network solvers, using a user specified model to solve a specific @@ -169,45 +615,45 @@

    RBAPINN -
    Parameters
    +
    Parameters:

    -
    -
    -loss_phys(samples, equation)[source]
    +
    +
    +loss_phys(samples, equation)[source]#

    Computes the physics loss for the residual-based attention PINN solver based on given samples and equation.

    -
    Parameters
    +
    Parameters:
    • samples (LabelTensor) – The samples to evaluate the physics loss.

    • equation (EquationInterface) – The governing equation representing the physics.

    -
    Returns
    +
    Returns:

    The physics loss calculated based on given samples and equation.

    -
    Return type
    +
    Return type:

    LabelTensor

    @@ -215,39 +661,100 @@

    RBAPINN - - -

    + + + + + + +
    + + + +
+ - +
+ + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/rom.html b/_rst/solvers/rom.html index 80c77bce..355371b2 100644 --- a/_rst/solvers/rom.html +++ b/_rst/solvers/rom.html @@ -1,128 +1,574 @@ + - - - - - ReducedOrderModelSolver — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + ReducedOrderModelSolver — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    ReducedOrderModelSolver

    -
    -
    -class ReducedOrderModelSolver(problem, reduction_network, interpolation_network, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0})[source]
    -

    Bases: pina.solvers.supervised.SupervisedSolver

    +
    + + + + + +
    + +
    +

    ReducedOrderModelSolver#

    +
    +
    +class ReducedOrderModelSolver(problem, reduction_network, interpolation_network, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0})[source]#
    +

    Bases: SupervisedSolver

    ReducedOrderModelSolver solver class. This class implements a Reduced Order Model solver, using user specified reduction_network and interpolation_network to solve a specific problem.

    @@ -191,22 +637,22 @@

    ReducedOrderModelSolverextra_feature.

    -
    Parameters
    +
    Parameters:
    • problem (AbstractProblem) – The formualation of the problem.

    • -
    • reduction_network (torch.nn.Module) – The reduction network used +

    • reduction_network (torch.nn.Module) – The reduction network used for reducing the input space. It must contain two methods, namely encode for input encoding and decode for decoding the former result.

    • -
    • interpolation_network (torch.nn.Module) – The interpolation network +

    • interpolation_network (torch.nn.Module) – The interpolation network for interpolating the control parameters to latent space obtain by the reduction_network encoding.

    • -
    • loss (torch.nn.Module) – The loss function used as minimizer, -default torch.nn.MSELoss.

    • -
    • extra_features (torch.nn.Module) – The additional input +

    • loss (torch.nn.Module) – The loss function used as minimizer, +default torch.nn.MSELoss.

    • +
    • extra_features (torch.nn.Module) – The additional input features to use as augmented input.

    • -
    • optimizer (torch.optim.Optimizer) – The neural network optimizer to -use; default is torch.optim.Adam.

    • +
    • optimizer (torch.optim.Optimizer) – The neural network optimizer to +use; default is torch.optim.Adam.

    • optimizer_kwargs (dict) – Optimizer constructor keyword args.

    • lr (float) – The learning rate; default is 0.001.

    • scheduler (torch.optim.LRScheduler) – Learning @@ -215,92 +661,155 @@

      ReducedOrderModelSolver

    -
    -
    -forward(x)[source]
    +
    +
    +forward(x)[source]#

    Forward pass implementation for the solver. It finds the encoder representation by calling interpolation_network.forward on the input, and maps this representation to output space by calling reduction_network.decode.

    -
    Parameters
    -

    x (torch.Tensor) – Input tensor.

    +
    Parameters:
    +

    x (torch.Tensor) – Input tensor.

    -
    Returns
    +
    Returns:

    Solver solution.

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    -
    -loss_data(input_pts, output_pts)[source]
    +
    +
    +loss_data(input_pts, output_pts)[source]#

    The data loss for the ReducedOrderModelSolver solver. It computes the loss between the network output against the true solution. This function should not be override if not intentionally.

    -
    Parameters
    +
    Parameters:
    • input_tensor (LabelTensor) – The input to the neural networks.

    • output_tensor (LabelTensor) – The true solution to compare the network solution.

    -
    Returns
    +
    Returns:

    The residual loss averaged on the input coordinates

    -
    Return type
    -

    torch.Tensor

    +
    Return type:
    +

    torch.Tensor

    -
    -
    -property neural_net
    -

    Neural network for training. It returns a ModuleDict +

    +
    +property neural_net#
    +

    Neural network for training. It returns a ModuleDict containing the reduction_network and interpolation_network.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/sapinn.html b/_rst/solvers/sapinn.html index 95604407..523986b6 100644 --- a/_rst/solvers/sapinn.html +++ b/_rst/solvers/sapinn.html @@ -1,128 +1,574 @@ + - - - - - SAPINN — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + SAPINN — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    SAPINN

    -
    -
    -class SAPINN(problem, model, weights_function=Sigmoid(), extra_features=None, loss=MSELoss(), optimizer_model=<class 'torch.optim.adam.Adam'>, optimizer_model_kwargs={'lr': 0.001}, optimizer_weights=<class 'torch.optim.adam.Adam'>, optimizer_weights_kwargs={'lr': 0.001}, scheduler_model=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_model_kwargs={'factor': 1, 'total_iters': 0}, scheduler_weights=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_weights_kwargs={'factor': 1, 'total_iters': 0})[source]
    -

    Bases: pina.solvers.pinns.basepinn.PINNInterface

    +
    + + + + + +
    + +
    +

    SAPINN#

    +
    +
    +class SAPINN(problem, model, weights_function=Sigmoid(), extra_features=None, loss=MSELoss(), optimizer_model=<class 'torch.optim.adam.Adam'>, optimizer_model_kwargs={'lr': 0.001}, optimizer_weights=<class 'torch.optim.adam.Adam'>, optimizer_weights_kwargs={'lr': 0.001}, scheduler_model=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_model_kwargs={'factor': 1, 'total_iters': 0}, scheduler_weights=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_weights_kwargs={'factor': 1, 'total_iters': 0})[source]#
    +

    Bases: PINNInterface

    Self Adaptive Physics Informed Neural Network (SAPINN) solver class. This class implements Self-Adaptive Physics Informed Neural Network solvers, using a user specified model to solve a specific @@ -172,28 +618,28 @@

    SAPINN -
    Parameters
    +
    Parameters:
    • problem (AbstractProblem) – The formualation of the problem.

    • -
    • model (torch.nn.Module) – The neural network model to use +

    • model (torch.nn.Module) – The neural network model to use for the model.

    • -
    • weights_function (torch.nn.Module) – The neural network model +

    • weights_function (torch.nn.Module) – The neural network model related to the mask of SAPINN. -default Sigmoid.

    • -
    • extra_features (list(torch.nn.Module)) – The additional input +default Sigmoid.

    • +
    • extra_features (list(torch.nn.Module)) – The additional input features to use as augmented input. If None no extra features -are passed. If it is a list of torch.nn.Module, +are passed. If it is a list of torch.nn.Module, the extra feature list is passed to all models. If it is a list of extra features’ lists, each single list of extra feature is passed to a model.

    • -
    • loss (torch.nn.Module) – The loss function used as minimizer, -default torch.nn.MSELoss.

    • -
    • optimizer_model (torch.optim.Optimizer) – The neural +

    • loss (torch.nn.Module) – The loss function used as minimizer, +default torch.nn.MSELoss.

    • +
    • optimizer_model (torch.optim.Optimizer) – The neural network optimizer to use for the model network , default is torch.optim.Adam.

    • optimizer_model_kwargs (dict) – Optimizer constructor keyword args. for the model.

    • -
    • optimizer_weights (torch.optim.Optimizer) – The neural +

    • optimizer_weights (torch.optim.Optimizer) – The neural network optimizer to use for mask model model, default is torch.optim.Adam.

    • optimizer_weights_kwargs (dict) – Optimizer constructor @@ -209,263 +655,336 @@

      SAPINN -
      -forward(x)[source]
      +
      +
      +forward(x)[source]#

      Forward pass implementation for the PINN solver. It returns the function evaluation \(\mathbf{u}(\mathbf{x})\) at the control points \(\mathbf{x}\).

      -
      Parameters
      +
      Parameters:

      x (LabelTensor) – Input tensor for the SAPINN solver. It expects a tensor \(N \times D\), where \(N\) the number of points in the mesh, \(D\) the dimension of the problem,

      -
      Returns
      +
      Returns:

      PINN solution.

      -
      Return type
      +
      Return type:

      LabelTensor

      -
      -
      -loss_phys(samples, equation)[source]
      +
      +
      +loss_phys(samples, equation)[source]#

      Computes the physics loss for the SAPINN solver based on given samples and equation.

      -
      Parameters
      +
      Parameters:
      • samples (LabelTensor) – The samples to evaluate the physics loss.

      • equation (EquationInterface) – The governing equation representing the physics.

      -
      Returns
      +
      Returns:

      The physics loss calculated based on given samples and equation.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

      -
      -
      -loss_data(input_tensor, output_tensor)[source]
      +
      +
      +loss_data(input_tensor, output_tensor)[source]#

      Computes the data loss for the SAPINN solver based on input and output. It computes the loss between the network output against the true solution.

      -
      Parameters
      +
      Parameters:
      • input_tensor (LabelTensor) – The input to the neural networks.

      • output_tensor (LabelTensor) – The true solution to compare the network solution.

      -
      Returns
      +
      Returns:

      The computed data loss.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

      -
      -
      -configure_optimizers()[source]
      +
      +
      +configure_optimizers()[source]#

      Optimizer configuration for the SAPINN solver.

      -
      Returns
      +
      Returns:

      The optimizers and the schedulers

      -
      Return type
      +
      Return type:

      tuple(list, list)

      -
      -
      -on_train_batch_end(outputs, batch, batch_idx)[source]
      +
      +
      +on_train_batch_end(outputs, batch, batch_idx)[source]#

      This method is called at the end of each training batch, and ovverides the PytorchLightining implementation for logging the checkpoints.

      -
      Parameters
      +
      Parameters:
        -
      • outputs (torch.Tensor) – The output from the model for the +

      • outputs (torch.Tensor) – The output from the model for the current batch.

      • batch (tuple) – The current batch of data.

      • batch_idx (int) – The index of the current batch.

      -
      Returns
      +
      Returns:

      Whatever is returned by the parent method on_train_batch_end.

      -
      Return type
      +
      Return type:

      Any

      -
      -
      -on_train_start()[source]
      +
      +
      +on_train_start()[source]#

      This method is called at the start of the training for setting the self adaptive weights as parameters of the mask model.

      -
      Returns
      +
      Returns:

      Whatever is returned by the parent method on_train_start.

      -
      Return type
      +
      Return type:

      Any

      -
      -
      -on_load_checkpoint(checkpoint)[source]
      +
      +
      +on_load_checkpoint(checkpoint)[source]#

      Overriding the Pytorch Lightning on_load_checkpoint to handle checkpoints for Self Adaptive Weights. This method should not be overridden if not intentionally.

      -
      Parameters
      +
      Parameters:

      checkpoint (dict) – Pytorch Lightning checkpoint dict.

      -
      -
      -property neural_net
      +
      +
      +property neural_net#

      Returns the neural network model.

      -
      Returns
      +
      Returns:

      The neural network model.

      -
      Return type
      -

      torch.nn.Module

      +
      Return type:
      +

      torch.nn.Module

      -
      -
      -property weights_dict
      +
      +
      +property weights_dict#

      Return the mask models associate to the application of the mask to the self adaptive weights for each loss that compones the global loss of the problem.

      -
      Returns
      +
      Returns:

      The ModuleDict for mask models.

      -
      Return type
      -

      torch.nn.ModuleDict

      +
      Return type:
      +

      torch.nn.ModuleDict

      -
      -
      -property scheduler_model
      +
      +
      +property scheduler_model#

      Returns the scheduler associated with the neural network model.

      -
      Returns
      +
      Returns:

      The scheduler for the neural network model.

      -
      Return type
      +
      Return type:

      torch.optim.lr_scheduler._LRScheduler

      -
      -
      -property scheduler_weights
      +
      +
      +property scheduler_weights#

      Returns the scheduler associated with the mask model (if applicable).

      -
      Returns
      +
      Returns:

      The scheduler for the mask model.

      -
      Return type
      +
      Return type:

      torch.optim.lr_scheduler._LRScheduler

      -
      -
      -property optimizer_model
      +
      +
      +property optimizer_model#

      Returns the optimizer associated with the neural network model.

      -
      Returns
      +
      Returns:

      The optimizer for the neural network model.

      -
      Return type
      -

      torch.optim.Optimizer

      +
      Return type:
      +

      torch.optim.Optimizer

      -
      -
      -property optimizer_weights
      +
      +
      +property optimizer_weights#

      Returns the optimizer associated with the mask model (if applicable).

      -
      Returns
      +
      Returns:

      The optimizer for the mask model.

      -
      Return type
      -

      torch.optim.Optimizer

      +
      Return type:
      +

      torch.optim.Optimizer

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/solver_interface.html b/_rst/solvers/solver_interface.html index 152abfba..84023928 100644 --- a/_rst/solvers/solver_interface.html +++ b/_rst/solvers/solver_interface.html @@ -1,181 +1,626 @@ + - - - - - SolverInterface — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + SolverInterface — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    SolverInterface

    -
    -
    -class SolverInterface(models, problem, optimizers, optimizers_kwargs, extra_features=None)[source]
    -

    Bases: pytorch_lightning.core.module.LightningModule

    +
    + + + + + +
    + +
    +

    SolverInterface#

    +
    +
    +class SolverInterface(models, problem, optimizers, optimizers_kwargs, extra_features=None)[source]#
    +

    Bases: LightningModule

    Solver base class. This class inherits is a wrapper of LightningModule class, inheriting all the LightningModule methods.

    -
    Parameters
    +
    Parameters:
      -
    • models (torch.nn.Module) – A torch neural network model instance.

    • +
    • models (torch.nn.Module) – A torch neural network model instance.

    • problem (AbstractProblem) – A problem definition instance.

    • -
    • optimizer (list(torch.optim.Optimizer)) – A list of neural network optimizers to +

    • optimizer (list(torch.optim.Optimizer)) – A list of neural network optimizers to use.

    • optimizer_kwargs (list(dict)) – A list of optimizer constructor keyword args.

    • -
    • extra_features (list(torch.nn.Module)) – The additional input +

    • extra_features (list(torch.nn.Module)) – The additional input features to use as augmented input. If None no extra features -are passed. If it is a list of torch.nn.Module, the extra feature +are passed. If it is a list of torch.nn.Module, the extra feature list is passed to all models. If it is a list of extra features’ lists, each single list of extra feature is passed to a model.

    -
    -
    -abstract forward(*args, **kwargs)[source]
    -

    Same as torch.nn.Module.forward().

    +
    +
    +abstract forward(*args, **kwargs)[source]#
    +

    Same as torch.nn.Module.forward().

    -
    Parameters
    +
    Parameters:
    • *args – Whatever you decide to pass into the forward method.

    • **kwargs – Keyword arguments are also possible.

    -
    Returns
    +
    Returns:

    Your model’s output

    -
    -
    -abstract training_step()[source]
    +
    +
    +abstract training_step()[source]#

    Here you compute and return the training loss and some additional metrics for e.g. the progress bar or logger.

    -
    Parameters
    +
    Parameters:
      -
    • batch – The output of your data iterable, normally a DataLoader.

    • +
    • batch – The output of your data iterable, normally a DataLoader.

    • batch_idx – The index of this batch.

    • dataloader_idx – The index of the dataloader that produced this batch. (only if multiple dataloaders used)

    -
    Returns
    +
    Returns:

    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/solvers/supervised.html b/_rst/solvers/supervised.html index 9922237d..2e5b9ab8 100644 --- a/_rst/solvers/supervised.html +++ b/_rst/solvers/supervised.html @@ -1,128 +1,574 @@ + - - - - - SupervisedSolver — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + SupervisedSolver — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    SupervisedSolver

    -
    -
    -class SupervisedSolver(problem, model, extra_features=None, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0})[source]
    -

    Bases: pina.solvers.solver.SolverInterface

    +
    + + + + + +
    + +
    +

    SupervisedSolver#

    +
    +
    +class SupervisedSolver(problem, model, extra_features=None, loss=MSELoss(), optimizer=<class 'torch.optim.adam.Adam'>, optimizer_kwargs={'lr': 0.001}, scheduler=<class 'torch.optim.lr_scheduler.ConstantLR'>, scheduler_kwargs={'factor': 1, 'total_iters': 0})[source]#
    +

    Bases: SolverInterface

    SupervisedSolver solver class. This class implements a SupervisedSolver, using a user specified model to solve a specific problem.

    The Supervised Solver class aims to find @@ -143,16 +589,16 @@

    SupervisedSolver -
    Parameters
    +
    Parameters:
    • problem (AbstractProblem) – The formualation of the problem.

    • -
    • model (torch.nn.Module) – The neural network model to use.

    • -
    • loss (torch.nn.Module) – The loss function used as minimizer, -default torch.nn.MSELoss.

    • -
    • extra_features (torch.nn.Module) – The additional input +

    • model (torch.nn.Module) – The neural network model to use.

    • +
    • loss (torch.nn.Module) – The loss function used as minimizer, +default torch.nn.MSELoss.

    • +
    • extra_features (torch.nn.Module) – The additional input features to use as augmented input.

    • -
    • optimizer (torch.optim.Optimizer) – The neural network optimizer to -use; default is torch.optim.Adam.

    • +
    • optimizer (torch.optim.Optimizer) – The neural network optimizer to +use; default is torch.optim.Adam.

    • optimizer_kwargs (dict) – Optimizer constructor keyword args.

    • lr (float) – The learning rate; default is 0.001.

    • scheduler (torch.optim.LRScheduler) – Learning @@ -161,133 +607,200 @@

      SupervisedSolver -
      -forward(x)[source]
      +
      +
      +forward(x)[source]#

      Forward pass implementation for the solver.

      -
      Parameters
      -

      x (torch.Tensor) – Input tensor.

      +
      Parameters:
      +

      x (torch.Tensor) – Input tensor.

      -
      Returns
      +
      Returns:

      Solver solution.

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

      -
      -
      -configure_optimizers()[source]
      +
      +
      +configure_optimizers()[source]#

      Optimizer configuration for the solver.

      -
      Returns
      +
      Returns:

      The optimizers and the schedulers

      -
      Return type
      +
      Return type:

      tuple(list, list)

      -
      -
      -training_step(batch, batch_idx)[source]
      +
      +
      +training_step(batch, batch_idx)[source]#

      Solver training step.

      -
      Parameters
      +
      Parameters:
      • batch (tuple) – The batch element in the dataloader.

      • batch_idx (int) – The batch index.

      -
      Returns
      +
      Returns:

      The sum of the loss functions.

      -
      Return type
      +
      Return type:

      LabelTensor

      -
      -
      -loss_data(input_pts, output_pts)[source]
      +
      +
      +loss_data(input_pts, output_pts)[source]#

      The data loss for the Supervised solver. It computes the loss between the network output against the true solution. This function should not be override if not intentionally.

      -
      Parameters
      +
      Parameters:
      • input_tensor (LabelTensor) – The input to the neural networks.

      • output_tensor (LabelTensor) – The true solution to compare the network solution.

      -
      Returns
      +
      Returns:

      The residual loss averaged on the input coordinates

      -
      Return type
      -

      torch.Tensor

      +
      Return type:
      +

      torch.Tensor

      -
      -
      -property scheduler
      +
      +
      +property scheduler#

      Scheduler for training.

      -
      -
      -property neural_net
      +
      +
      +property neural_net#

      Neural network for training.

      -
      -
      -property loss
      +
      +
      +property loss#

      Loss for training.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/trainer.html b/_rst/trainer.html index 8a648c5f..7b6eaf1d 100644 --- a/_rst/trainer.html +++ b/_rst/trainer.html @@ -1,184 +1,698 @@ + - - - - - Trainer — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Trainer — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + -
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Trainer

    +
    + + + + + +
    + +
    +

    Trainer#

    Trainer module.

    -
    -
    -class Trainer(solver, batch_size=None, **kwargs)[source]
    -

    Bases: pytorch_lightning.trainer.trainer.Trainer

    +
    +
    +class Trainer(solver, batch_size=None, **kwargs)[source]#
    +

    Bases: Trainer

    PINA Trainer class for costumizing every aspect of training via flags.

    -
    Parameters
    +
    Parameters:
    • solver (SolverInterface) – A pina:class:SolverInterface solver for the differential problem.

    • -
    • batch_size (int | None) – How many samples per batch to load. If batch_size=None all +

    • batch_size (int | None) – How many samples per batch to load. If batch_size=None all samples are loaded and data are not batched, defaults to None.

    -
    Keyword Arguments
    +
    Keyword Arguments:

    The additional keyword arguments specify the training setup and can be choosen from the pytorch-lightning Trainer API

    -
    -
    -train(**kwargs)[source]
    +
    +
    +train(**kwargs)[source]#

    Train the solver method.

    -
    -
    -property solver
    +
    +
    +property solver#

    Returning trainer solver.

    -
    +
-
- - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial1/tutorial.html b/_rst/tutorials/tutorial1/tutorial.html index fd82109f..093a2a45 100644 --- a/_rst/tutorials/tutorial1/tutorial.html +++ b/_rst/tutorials/tutorial1/tutorial.html @@ -1,116 +1,497 @@ + - - - - - Tutorial: Physics Informed Neural Networks on PINA — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: Physics Informed Neural Networks on PINA — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
- - -
- -
-
-
- -
-
+
+

Write the problem class#

Once the Problem class is initialized, we need to represent the differential equation in PINA. In order to do this, we need to load the PINA operators from pina.operators module. Again, we’ll @@ -257,10 +638,10 @@

Write the problem classtruth_solution function is a method of the PINN class, but it is not mandatory for problem definition.

-

- -
-

Generate data

+ + +
+

Generate data#

Data for training can come in form of direct numerical simulation results, or points in the domains. In case we perform unsupervised learning, we just need the collocation points for training, i.e. points @@ -324,9 +705,9 @@

Generate data -

-
-

Perform a small training

+ +
+

Perform a small training#

Once we have defined the problem and generated the data we can start the modelling. Here we will choose a FeedForward neural network available in pina.model, and we will train using the PINN solver @@ -394,9 +775,9 @@

Perform a small training

As we can see the loss has not reached a minimum, suggesting that we could train for longer

-

-
-

What’s next?

+ +
+

What’s next?#

Congratulations on completing the introductory tutorial of PINA! There are several directions you can go now:

    @@ -406,40 +787,105 @@

    What’s next? - - -

+ + + + + + + + + + +
+ +
+ + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial10/tutorial.html b/_rst/tutorials/tutorial10/tutorial.html index 0ccc5e95..b4ffb156 100644 --- a/_rst/tutorials/tutorial10/tutorial.html +++ b/_rst/tutorials/tutorial10/tutorial.html @@ -1,114 +1,497 @@ + - - - - - Tutorial: Averaging Neural Operator for solving Kuramoto Sivashinsky equation — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: Averaging Neural Operator for solving Kuramoto Sivashinsky equation — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    Tutorial: Averaging Neural Operator for solving Kuramoto Sivashinsky equation

    + +
    + + +
    +
    + + + + + +
    + +
    +

    Tutorial: Averaging Neural Operator for solving Kuramoto Sivashinsky equation#

    In this tutorial we will build a Neural Operator using the AveragingNeuralOperator model and the SupervisedSolver. At the end of the tutorial you will be able to train a Neural Operator for @@ -125,8 +508,8 @@

    Tutorial: Averaging Neural Operator for solving Kuramoto Sivashinsky equatio from pina.trainer import Trainer

    -
    -

    Data Generation

    +
    +

    Data Generation#

    We will focus on solving a specific PDE, the Kuramoto Sivashinsky (KS) equation. The KS PDE is a fourth-order nonlinear PDE with the following form:

    @@ -221,9 +604,9 @@

    Data GenerationSupervisedSolver class to tackle the problem.

    -

    -
+
+

Averaging Neural Operator#

We will build a neural operator \(\texttt{NO}\) which takes the solution at time \(t=0\) for any \(x\in\Omega\), the time \((t)\) at which we want to compute the solution, and gives back the @@ -291,9 +674,9 @@

Averaging Neural OperatorSIREN activation function, more on Implicit Neural Representations with Periodic Activation Functions.

-

-
-

Solving the KS problem

+ +
+

Solving the KS problem#

We will now focus on solving the KS equation using the SupervisedSolver class and the AveragingNeuralOperator model. As done in the FNO @@ -362,9 +745,9 @@

Solving the KS problem

as we can see the error is pretty small, which agrees with what we can see from the previous plots.

-

-
-

What’s next?

+ +
+

What’s next?#

Now you know how to solve a time dependent neural operator problem in PINA! There are multiple directions you can go now:

    @@ -377,40 +760,101 @@

    What’s next? - - -

+ + + + + + + + + + +
+ +
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial11/tutorial.html b/_rst/tutorials/tutorial11/tutorial.html index 6c4d1ce5..bde6b807 100644 --- a/_rst/tutorials/tutorial11/tutorial.html +++ b/_rst/tutorials/tutorial11/tutorial.html @@ -1,117 +1,496 @@ + - - - - - Tutorial: PINA and PyTorch Lightning, training tips and visualizations — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: PINA and PyTorch Lightning, training tips and visualizations — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
- - -
- -
-
-
-
+
+

Trainer Logging#

In PINA you can log metrics in different ways. The simplest approach is to use the MetricTraker class from pina.callbacks as seen in the Introduction to PINA for Physics Informed Neural Networks @@ -284,9 +663,9 @@

Trainer Loggingmore on hooks).

-

-
-

Trainer Callbacks

+ +
+

Trainer Callbacks#

Whenever we need to access certain steps of the training for logging, do static modifications (i.e. not changing the Solver) or updating Problem hyperparameters (static variables), we can use @@ -403,9 +782,9 @@

Trainer Callbacks -

Trainer Tips to Boost Accuracy, Save Memory and Speed Up Training

+

+
+

Trainer Tips to Boost Accuracy, Save Memory and Speed Up Training#

Untill now we have seen how to choose the right accelerator, how to log and visualize the results, and how to interface with the program in order to add specific parts of code at specific points by callbacks. @@ -547,9 +926,9 @@

Trainer Tips to Boost Accuracy, Save Memory and Speed Up Training -

What’s next?

+

+
+

What’s next?#

Now you know how to use efficiently the Trainer class PINA! There are multiple directions you can go now:

    @@ -558,40 +937,102 @@

    What’s next?Trainer speed for different precisions.

-
- + + - - - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial12/tutorial.html b/_rst/tutorials/tutorial12/tutorial.html index ef66ae52..a924627c 100644 --- a/_rst/tutorials/tutorial12/tutorial.html +++ b/_rst/tutorials/tutorial12/tutorial.html @@ -1,120 +1,502 @@ + - - - - - Tutorial: The Equation Class — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: The Equation Class — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + - -
-
+ + + + + + + + + + + + + + + + + - - - -
- -
-
-
-
+ - - - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial13/tutorial.html b/_rst/tutorials/tutorial13/tutorial.html index 5d2ae0f1..93336acd 100644 --- a/_rst/tutorials/tutorial13/tutorial.html +++ b/_rst/tutorials/tutorial13/tutorial.html @@ -1,116 +1,497 @@ + - - - - - Tutorial: Multiscale PDE learning with Fourier Feature Network — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: Multiscale PDE learning with Fourier Feature Network — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + - -
-
+ + + + + + + + + + + + + + + + + - - - -
- -
-
-
-
+ - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial2/tutorial.html b/_rst/tutorials/tutorial2/tutorial.html index d5113ceb..29a6f50b 100644 --- a/_rst/tutorials/tutorial2/tutorial.html +++ b/_rst/tutorials/tutorial2/tutorial.html @@ -1,118 +1,497 @@ + - - - - - Tutorial: Two dimensional Poisson problem using Extra Features Learning — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: Two dimensional Poisson problem using Extra Features Learning — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    Tutorial: Two dimensional Poisson problem using Extra Features Learning

    + +
    + + +
    +
    + + + + + +
    + +
    +

    Tutorial: Two dimensional Poisson problem using Extra Features Learning#

    This tutorial presents how to solve with Physics-Informed Neural Networks (PINNs) a 2D Poisson problem with Dirichlet boundary conditions. We will train with standard PINN’s training, and with @@ -136,8 +515,8 @@

    Tutorial: Two dimensional Poisson problem using Extra Features Learning

    -
    -

    The problem definition

    +
    +

    The problem definition#

    The two-dimensional Poisson problem is mathematically written as:

    \[\begin{split}\begin{equation} @@ -187,9 +566,9 @@

    The problem definition

    -
    -
+
+

Solving the problem with standard PINNs#

After the problem, the feed-forward neural network is defined, through the class FeedForward. This neural network takes as input the coordinates (in this case \(x\) and \(y\)) and provides the @@ -229,9 +608,9 @@

Solving the problem with standard PINNs -

-
-

Solving the problem with extra-features PINNs

+ +
+

Solving the problem with extra-features PINNs#

Now, the same problem is solved in a different way. A new neural network is now defined, with an additional input variable, named extra-feature, which coincides with the forcing term in the Laplace equation. The set @@ -289,9 +668,9 @@

Solving the problem with extra-features PINNs -

-
-

Solving the problem with learnable extra-features PINNs

+ +
+

Solving the problem with learnable extra-features PINNs#

We can still do better!

Another way to exploit the extra features is the addition of learnable parameter inside them. In this way, the added parameters are learned @@ -387,9 +766,9 @@

Solving the problem with learnable extra-features PINNs

../../../_images/tutorial_23_0.png - -
-

What’s next?

+ +
+

What’s next?#

Nice you have completed the two dimensional Poisson tutorial of PINA! There are multiple directions you can go now:

    @@ -400,40 +779,102 @@

    What’s next? - - -

+ + + + + + + + + + +
+ +
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial3/tutorial.html b/_rst/tutorials/tutorial3/tutorial.html index 41ebd0ed..159b82b1 100644 --- a/_rst/tutorials/tutorial3/tutorial.html +++ b/_rst/tutorials/tutorial3/tutorial.html @@ -1,117 +1,497 @@ + - - - - - Tutorial: Two dimensional Wave problem with hard constraint — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: Two dimensional Wave problem with hard constraint — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    Tutorial: Two dimensional Wave problem with hard constraint

    + +
    + + +
    +
    + + + + + +
    + +
    +

    Tutorial: Two dimensional Wave problem with hard constraint#

    In this tutorial we present how to solve the wave equation using hard constraint PINNs. For doing so we will build a costum torch model and pass it to the PINN solver.

    @@ -128,8 +508,8 @@

    Tutorial: Two dimensional Wave problem with hard constraint -

    The problem definition

    +
    +

    The problem definition#

    The problem is written in the following form:

    \[\begin{split}\begin{equation} @@ -183,9 +563,9 @@

    The problem definition

    -
    -
+
+

Hard Constraint Model#

After the problem, a torch model is needed to solve the PINN. Usually, many models are already implemented in PINA, but the user has the possibility to build his/her own model in torch. The hard @@ -217,9 +597,9 @@

Hard Constraint Model

- -
-

Train and Inference

+ +
+

Train and Inference#

In this tutorial, the neural network is trained for 1000 epochs with a learning rate of 0.001 (default in PINN). Training takes approximately 3 minutes.

@@ -343,9 +723,9 @@

Train and Inference -

What’s next?

+

+
+

What’s next?#

Nice you have completed the two dimensional Wave tutorial of PINA! There are multiple directions you can go now:

    @@ -358,40 +738,101 @@

    What’s next? - - -

+ + + + + + + + + + +
+ +
+ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial4/tutorial.html b/_rst/tutorials/tutorial4/tutorial.html index c5ac6a5a..9baacc38 100644 --- a/_rst/tutorials/tutorial4/tutorial.html +++ b/_rst/tutorials/tutorial4/tutorial.html @@ -1,114 +1,497 @@ + - - - - - Tutorial: Unstructured convolutional autoencoder via continuous convolution — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: Unstructured convolutional autoencoder via continuous convolution — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
- - -
- -
-
-
- -
-

Continuous filter background

+
+

Continuous filter background#

As reported by the authors in the original paper: in contrast to discrete convolution, continuous convolution is mathematically defined as:

@@ -166,8 +549,8 @@

Continuous filter background\(\rightarrow\) stride variable in ContinuousConv 3. The filter rectangular domain \(\rightarrow\) filter_dim variable in ContinuousConv

-
-

Input function

+
+

Input function#

The input function for the continuous filter is defined as a tensor of shape:

-

-
+
+

Stride#

The stride is passed as a dictionary stride which tells the filter where to go. Here is an example for the \([0,1]\times[0,5]\) domain:

# stride definition
@@ -246,9 +629,9 @@ 

Stride -

Filter definition

+

+
+

Filter definition#

Having defined all the previous blocks we are able to construct the continuous filter. Suppose we would like to get an ouput with only one field, and let us fix the filter dimension to be \([0.1, 0.1]\).

@@ -322,10 +705,10 @@

Filter definition -

Building a MNIST Classifier

+

+ +
+

Building a MNIST Classifier#

Let’s see how we can build a MNIST classifier using a continuous convolutional filter. We will use the MNIST dataset from PyTorch. In order to keep small training times we use only 6000 samples for training @@ -506,9 +889,9 @@

Building a MNIST Classifier -

Building a Continuous Convolutional Autoencoder

+

+
+

Building a Continuous Convolutional Autoencoder#

Just as toy problem, we will now build an autoencoder for the following function \(f(x,y)=\sin(\pi x)\sin(\pi y)\) on the unit circle domain centered in \((0.5, 0.5)\). We will also see the ability to @@ -692,8 +1075,8 @@

Building a Continuous Convolutional Autoencoder -

Filter for upsampling

+
+

Filter for upsampling#

Suppose we have already the hidden dimension and we want to upsample on a differen grid with more points. Let’s see how to do it:

# setting the seed
@@ -734,9 +1117,9 @@ 

Filter for upsampling
l2 error: 8.49%
 

-
-
+
+

Autoencoding at different resolution#

In the previous example we already had the hidden dimension (of original input) and we used it to upsample. Sometimes however we have a more fine mesh solution and we simply want to encode it. This can be done without @@ -779,10 +1162,10 @@

Autoencoding at different resolution
l2 error: 8.59%
 

- - -
-

What’s next?

+ + +
+

What’s next?#

We have shown the basic usage of a convolutional filter. There are additional extensions possible:

    @@ -791,40 +1174,110 @@

    What’s next? - - -

+ + + + + + + + + + +
+ +
+ + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial5/tutorial.html b/_rst/tutorials/tutorial5/tutorial.html index 4ad3fbdc..68cbf3ea 100644 --- a/_rst/tutorials/tutorial5/tutorial.html +++ b/_rst/tutorials/tutorial5/tutorial.html @@ -1,114 +1,497 @@ + - - - - - Tutorial: Two dimensional Darcy flow using the Fourier Neural Operator — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: Two dimensional Darcy flow using the Fourier Neural Operator — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
- - -
- -
-
-
-
+
+

Solving the problem with a Fuorier Neural Operator (FNO)#

We will now move to solve the problem using a FNO. Since we are learning operator this approach is better suited, as we shall see.

# make model
@@ -275,47 +658,108 @@ 

Solving the problem with a Fuorier Neural Operator (FNO) -

What’s next?

+

+
+

What’s next?#

We have made a very simple example on how to use the FNO for learning neural operator. Currently in PINA we implement 1D/2D/3D cases. We suggest to extend the tutorial using more complex problems and train for longer, to see the full potential of neural operators.

-
- + + - - - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial6/tutorial.html b/_rst/tutorials/tutorial6/tutorial.html index 110a94b4..c21cd875 100644 --- a/_rst/tutorials/tutorial6/tutorial.html +++ b/_rst/tutorials/tutorial6/tutorial.html @@ -1,116 +1,497 @@ + - - - - - Tutorial: Building custom geometries with PINA Location class — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: Building custom geometries with PINA Location class — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
- - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + +
    -
    -
    - -
    -

    Tutorial: Building custom geometries with PINA Location class

    + +
    + + +
    +
    + + + + + +
    + +
    +

    Tutorial: Building custom geometries with PINA Location class#

    In this tutorial we will show how to use geometries in PINA. Specifically, the tutorial will include how to create geometries and how to visualize them. The topics covered are:

    @@ -129,8 +510,8 @@

    Tutorial: Building custom geometries with PINA -

    Built-in Geometries

    +
    +

    Built-in Geometries#

    We will create one cartesian and two ellipsoids. For the sake of simplicity, we show here the 2-dimensional, but it’s trivial the extension to 3D (and higher) cases. The geometries allows also the @@ -207,8 +588,8 @@

    Built-in GeometriesEllipsoidDomain without the border is just the ellipse. We can also see that the CartesianDomain is just a square.

    -
    -

    Simplex Domain

    +
    +

    Simplex Domain#

    Among the built-in shapes, we quickly show here the usage of SimplexDomain, which can be used for polygonal domains!

    import torch
    @@ -236,10 +617,10 @@ 

    Simplex Domain -

    -
    -
    +

    +
    +

    Boolean Operations#

    To create complex shapes we can use the boolean operations, for example to merge two default geometries. We need to simply use the Union class: it takes a list of geometries and returns the union of them.

    @@ -280,9 +661,9 @@

    Boolean Operations -

    -
+
+

Create Custom Location#

We will take a look on how to create our own geometry. The one we will try to make is a heart defined by the function

@@ -354,46 +735,110 @@

Create Custom Location

../../../_images/tutorial_36_0.png - -
-

What’s next?

+ +
+

What’s next?#

We have made a very simple tutorial on how to build custom geometries and use domain operation to compose base geometries. Now you can play around with different geometries and build your own!

-
- + + - - - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial7/tutorial.html b/_rst/tutorials/tutorial7/tutorial.html index 656d3e29..9642bfbf 100644 --- a/_rst/tutorials/tutorial7/tutorial.html +++ b/_rst/tutorials/tutorial7/tutorial.html @@ -1,117 +1,499 @@ + - - - - - Tutorial: Resolution of an inverse problem — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: Resolution of an inverse problem — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + - -
-
+ + + + + + + + + + + + + + + + + - - - -
- -
-
-
-
    -
  • - - -
  • - View page source -
  • + +
    + + +
    +
    + +
    +
    + +
    + +
    + + + + +
    + +
    + +
    -
    -
    - -
    -

    Tutorial: Resolution of an inverse problem

    -
    -

    Introduction to the inverse problem

    +
    + + + + + +
    + +
    +

    Tutorial: Resolution of an inverse problem#

    +
    +

    Introduction to the inverse problem#

    This tutorial shows how to solve an inverse Poisson problem with Physics-Informed Neural Networks. The problem definition is that of a Poisson problem with homogeneous boundary conditions and it reads:

    @@ -168,9 +550,9 @@

    Introduction to the inverse problem -

    -
+
+

Inverse problem definition in PINA#

Then, we initialize the Poisson problem, that is inherited from the SpatialProblem and from the InverseProblem classes. We here have to define all the variables, and the domain where our unknown parameters @@ -296,40 +678,99 @@

Inverse problem definition in PINA - - +

+ - - - - - - + - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial8/tutorial.html b/_rst/tutorials/tutorial8/tutorial.html index 9333de2f..710a4d80 100644 --- a/_rst/tutorials/tutorial8/tutorial.html +++ b/_rst/tutorials/tutorial8/tutorial.html @@ -1,124 +1,512 @@ + - - - - - Tutorial: Reduced order model (PODNN) for parametric problems — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: Reduced order model (POD-RBF or POD-NN) for parametric problems — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
- -
-
+ + + + + + + + + + + + + + + + + - - - -
- -
-
-
-
+ + + + + + + + + + + + +
-
- - - - + + + + + + + + - + + + + + + + \ No newline at end of file diff --git a/_rst/tutorials/tutorial9/tutorial.html b/_rst/tutorials/tutorial9/tutorial.html index 902a975a..904d8477 100644 --- a/_rst/tutorials/tutorial9/tutorial.html +++ b/_rst/tutorials/tutorial9/tutorial.html @@ -1,116 +1,497 @@ + - - - - - Tutorial: One dimensional Helmotz equation using Periodic Boundary Conditions — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + Tutorial: One dimensional Helmotz equation using Periodic Boundary Conditions — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + - -
-
+ + + + + + + + + + + + + + + + + - - - -
- -
-
-
-
+ - + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/_sources/_rst/_code.rst.txt b/_sources/_rst/_code.rst.txt index 9062b3b1..16a42986 100644 --- a/_sources/_rst/_code.rst.txt +++ b/_sources/_rst/_code.rst.txt @@ -1,7 +1,7 @@ Code Documentation ================== Welcome to PINA documentation! Here you can find the modules of the package divided in different sections. -The high-level structure of the package is depicted in our API. +The high-level structure of the package is depicted in our API. .. figure:: ../index_files/API_color.png :alt: PINA application program interface @@ -15,7 +15,7 @@ The pipeline to solve differential equations with PINA follows just five steps: 2. Generate data using built in `Geometries`_, or load high level simulation results as :doc:`LabelTensor ` 3. Choose or build one or more `Models`_ to solve the problem 4. Choose a solver across PINA available `Solvers`_, or build one using the :doc:`SolverInterface ` - 5. Train the model with the PINA :doc:`Trainer `, enhance the train with `Callbacks_` + 5. Train the model with the PINA :doc:`Trainer `, enhance the train with `Callbacks`_ PINA Features -------------- @@ -33,7 +33,7 @@ Solvers .. toctree:: :titlesonly: - + SolverInterface PINNInterface PINN @@ -59,6 +59,7 @@ Models FeedForward MultiFeedForward ResidualFeedForward + Spline DeepONet MIONet FourierIntegralKernel @@ -82,13 +83,14 @@ Layers Proper Orthogonal Decomposition Periodic Boundary Condition Embedding Fourier Feature Embedding + Radial Basis Function Interpolation Adaptive Activation Functions ------------------------------- .. toctree:: :titlesonly: - + Adaptive Function Interface Adaptive ReLU Adaptive Sigmoid @@ -102,14 +104,14 @@ Adaptive Activation Functions Adaptive Softmax Adaptive SIREN Adaptive Exp - + Equations and Operators ------------------------- .. toctree:: :titlesonly: - + Equations Differential Operators @@ -154,9 +156,9 @@ Callbacks .. toctree:: :titlesonly: - Metric tracking - Optimizer callbacks - Adaptive Refinments + Processing Callbacks + Optimizer Callbacks + Adaptive Refinment Callback Metrics and Losses -------------------- @@ -166,4 +168,4 @@ Metrics and Losses LossInterface LpLoss - PowerLoss \ No newline at end of file + PowerLoss diff --git a/_sources/_rst/_installation.rst.txt b/_sources/_rst/_installation.rst.txt index 0f6fdb9f..eae78dc9 100644 --- a/_sources/_rst/_installation.rst.txt +++ b/_sources/_rst/_installation.rst.txt @@ -1,7 +1,7 @@ Installation ============ -**PINA** requires requires `numpy`, `scipy`, `matplotlib`, `future`, `torch`, `sphinx` (for the documentation) and `pytest` (for local test). The code is tested for Python 3, while compatibility of Python 2 is not guaranteed anymore. It can be installed using `pip` or directly from the source code. +**PINA** requires requires `numpy`, `matplotlib`, `torch`, `lightning`, `sphinx` (for the documentation) and `pytest` (for local test). The code is tested for Python 3, while compatibility of Python 2 is not guaranteed anymore. It can be installed using `pip` or directly from the source code. Installing via PIP __________________ @@ -11,13 +11,13 @@ To install the package just type: .. code-block:: bash - $ pip install git+https://github.com/mathLab/PINA.git + $ pip install pina-mathlab To uninstall the package: .. code-block:: bash - $ pip uninstall pina + $ pip uninstall pina-mathlab Installing from source ______________________ diff --git a/_sources/_rst/_tutorial.rst.txt b/_sources/_rst/_tutorial.rst.txt index 756d42e2..4e2d2050 100644 --- a/_sources/_rst/_tutorial.rst.txt +++ b/_sources/_rst/_tutorial.rst.txt @@ -43,4 +43,4 @@ Supervised Learning :titlesonly: Unstructured convolutional autoencoder via continuous convolution - POD-NN for reduced order modeling + POD-RBF and POD-NN for reduced order modeling diff --git a/_sources/_rst/callbacks/processing_callbacks.rst.txt b/_sources/_rst/callbacks/processing_callbacks.rst.txt index e024a49a..bd3bbc84 100644 --- a/_sources/_rst/callbacks/processing_callbacks.rst.txt +++ b/_sources/_rst/callbacks/processing_callbacks.rst.txt @@ -3,5 +3,9 @@ Processing callbacks .. currentmodule:: pina.callbacks.processing_callbacks .. autoclass:: MetricTracker + :members: + :show-inheritance: + +.. autoclass:: PINAProgressBar :members: :show-inheritance: \ No newline at end of file diff --git a/_sources/_rst/layers/orthogonal.rst.txt b/_sources/_rst/layers/orthogonal.rst.txt new file mode 100644 index 00000000..6dfc4009 --- /dev/null +++ b/_sources/_rst/layers/orthogonal.rst.txt @@ -0,0 +1,7 @@ +OrthogonalBlock +====================== +.. currentmodule:: pina.model.layers.orthogonal + +.. autoclass:: OrthogonalBlock + :members: + :show-inheritance: \ No newline at end of file diff --git a/_sources/_rst/layers/rbf_layer.rst.txt b/_sources/_rst/layers/rbf_layer.rst.txt new file mode 100644 index 00000000..8736d1a2 --- /dev/null +++ b/_sources/_rst/layers/rbf_layer.rst.txt @@ -0,0 +1,7 @@ +RBFBlock +====================== +.. currentmodule:: pina.model.layers.rbf_layer + +.. autoclass:: RBFBlock + :members: + :show-inheritance: diff --git a/_sources/_rst/models/spline.rst.txt b/_sources/_rst/models/spline.rst.txt new file mode 100644 index 00000000..aa7450b7 --- /dev/null +++ b/_sources/_rst/models/spline.rst.txt @@ -0,0 +1,7 @@ +Spline +======== +.. currentmodule:: pina.model.spline + +.. autoclass:: Spline + :members: + :show-inheritance: \ No newline at end of file diff --git a/_sources/_rst/tutorials/tutorial8/tutorial.rst.txt b/_sources/_rst/tutorials/tutorial8/tutorial.rst.txt index b160e092..5e6dca93 100644 --- a/_sources/_rst/tutorials/tutorial8/tutorial.rst.txt +++ b/_sources/_rst/tutorials/tutorial8/tutorial.rst.txt @@ -1,18 +1,20 @@ -Tutorial: Reduced order model (PODNN) for parametric problems -=============================================================== +Tutorial: Reduced order model (POD-RBF or POD-NN) for parametric problems +========================================================================= The tutorial aims to show how to employ the **PINA** library in order to apply a reduced order modeling technique [1]. Such methodologies have several similarities with machine learning approaches, since the main -goal consists of predicting the solution of differential equations +goal consists in predicting the solution of differential equations (typically parametric PDEs) in a real-time fashion. In particular we are going to use the Proper Orthogonal Decomposition -with Neural Network (PODNN) [2], which basically perform a dimensional -reduction using the POD approach, approximating the parametric solution -manifold (at the reduced space) using a NN. In this example, we use a -simple multilayer perceptron, but the plenty of different archiutectures -can be plugged as well. +with either Radial Basis Function Interpolation(POD-RBF) or Neural +Network (POD-NN) [2]. Here we basically perform a dimensional reduction +using the POD approach, and approximating the parametric solution +manifold (at the reduced space) using an interpolation (RBF) or a +regression technique (NN). In this example, we use a simple multilayer +perceptron, but the plenty of different architectures can be plugged as +well. References ^^^^^^^^^^ @@ -30,25 +32,25 @@ minimum PINA version to run this tutorial is the ``0.1``. .. code:: ipython3 %matplotlib inline - + import matplotlib.pyplot as plt import torch import pina - + from pina.geometry import CartesianDomain - + from pina.problem import ParametricProblem - from pina.model.layers import PODBlock + from pina.model.layers import PODBlock, RBFBlock from pina import Condition, LabelTensor, Trainer from pina.model import FeedForward from pina.solvers import SupervisedSolver - + print(f'We are using PINA version {pina.__version__}') .. parsed-literal:: - We are using PINA version 0.1 + We are using PINA version 0.1.1 We exploit the `Smithers `__ library to @@ -60,26 +62,27 @@ snapshots of the velocity (along :math:`x`, :math:`y`, and the magnitude) and pressure fields, and the corresponding parameter values. To visually check the snapshots, let’s plot also the data points and the -reference solution: this is the expected output of the neural network. +reference solution: this is the expected output of our model. .. code:: ipython3 from smithers.dataset import NavierStokesDataset dataset = NavierStokesDataset() - + fig, axs = plt.subplots(1, 4, figsize=(14, 3)) for ax, p, u in zip(axs, dataset.params[:4], dataset.snapshots['mag(v)'][:4]): ax.tricontourf(dataset.triang, u, levels=16) ax.set_title(f'$\mu$ = {p[0]:.2f}') -.. image:: tutorial_files/tutorial_5_1.png + +.. image:: tutorial_files/tutorial_5_0.png The *snapshots* - aka the numerical solutions computed for several parameters - and the corresponding parameters are the only data we need -to train the model, in order to predict for any new test parameter the -solution. To properly validate the accuracy, we initially split the 500 +to train the model, in order to predict the solution for any new test +parameter. To properly validate the accuracy, we initially split the 500 snapshots into the training dataset (90% of the original data) and the testing one (the reamining 10%). It must be said that, to plug the snapshots into **PINA**, we have to cast them to ``LabelTensor`` @@ -89,10 +92,10 @@ objects. u = torch.tensor(dataset.snapshots['mag(v)']).float() p = torch.tensor(dataset.params).float() - + p = LabelTensor(p, labels=['mu']) u = LabelTensor(u, labels=[f's{i}' for i in range(u.shape[1])]) - + ratio_train_test = 0.9 n = u.shape n_train = int(u.shape[0] * ratio_train_test) @@ -109,17 +112,94 @@ methodology), just defining a simple *input-output* condition. class SnapshotProblem(ParametricProblem): output_variables = [f's{i}' for i in range(u.shape[1])] parameter_domain = CartesianDomain({'mu': [0, 100]}) - + conditions = { - 'io': Condition(input_points=p, output_points=u) + 'io': Condition(input_points=p_train, output_points=u_train) } -Then, we define the model we want to use: basically we have a MLP -architecture that takes in input the parameter and return the *modal -coefficients*, so the reduced dimension representation (the coordinates -in the POD space). Such latent variable is the projected to the original -space using the POD modes, which are computed and stored in the -``PODBlock`` object. + poisson_problem = SnapshotProblem() + +We can then build a ``PODRBF`` model (using a Radial Basis Function +interpolation as approximation) and a ``PODNN`` approach (using an MLP +architecture as approximation). + +POD-RBF reduced order model +--------------------------- + +Then, we define the model we want to use, with the POD (``PODBlock``) +and the RBF (``RBFBlock``) objects. + +.. code:: ipython3 + + class PODRBF(torch.nn.Module): + """ + Proper orthogonal decomposition with Radial Basis Function interpolation model. + """ + + def __init__(self, pod_rank, rbf_kernel): + """ + + """ + super().__init__() + + self.pod = PODBlock(pod_rank) + self.rbf = RBFBlock(kernel=rbf_kernel) + + + def forward(self, x): + """ + Defines the computation performed at every call. + + :param x: The tensor to apply the forward pass. + :type x: torch.Tensor + :return: the output computed by the model. + :rtype: torch.Tensor + """ + coefficents = self.rbf(x) + return self.pod.expand(coefficents) + + def fit(self, p, x): + """ + Call the :meth:`pina.model.layers.PODBlock.fit` method of the + :attr:`pina.model.layers.PODBlock` attribute to perform the POD, + and the :meth:`pina.model.layers.RBFBlock.fit` method of the + :attr:`pina.model.layers.RBFBlock` attribute to fit the interpolation. + """ + self.pod.fit(x) + self.rbf.fit(p, self.pod.reduce(x)) + +We can then fit the model and ask it to predict the required field for +unseen values of the parameters. Note that this model does not need a +``Trainer`` since it does not include any neural network or learnable +parameters. + +.. code:: ipython3 + + pod_rbf = PODRBF(pod_rank=20, rbf_kernel='thin_plate_spline') + pod_rbf.fit(p_train, u_train) + +.. code:: ipython3 + + u_test_rbf = pod_rbf(p_test) + u_train_rbf = pod_rbf(p_train) + + relative_error_train = torch.norm(u_train_rbf - u_train)/torch.norm(u_train) + relative_error_test = torch.norm(u_test_rbf - u_test)/torch.norm(u_test) + + print('Error summary for POD-RBF model:') + print(f' Train: {relative_error_train.item():e}') + print(f' Test: {relative_error_test.item():e}') + + +.. parsed-literal:: + + Error summary for POD-RBF model: + Train: 1.287801e-03 + Test: 1.217041e-03 + + +POD-NN reduced order model +-------------------------- .. code:: ipython3 @@ -127,13 +207,13 @@ space using the POD modes, which are computed and stored in the """ Proper orthogonal decomposition with neural network model. """ - + def __init__(self, pod_rank, layers, func): """ - + """ super().__init__() - + self.pod = PODBlock(pod_rank) self.nn = FeedForward( input_dimensions=1, @@ -141,12 +221,12 @@ space using the POD modes, which are computed and stored in the layers=layers, func=func ) - - + + def forward(self, x): """ Defines the computation performed at every call. - + :param x: The tensor to apply the forward pass. :type x: torch.Tensor :return: the output computed by the model. @@ -154,7 +234,7 @@ space using the POD modes, which are computed and stored in the """ coefficents = self.nn(x) return self.pod.expand(coefficents) - + def fit_pod(self, x): """ Just call the :meth:`pina.model.layers.PODBlock.fit` method of the @@ -164,29 +244,27 @@ space using the POD modes, which are computed and stored in the We highlight that the POD modes are directly computed by means of the singular value decomposition (computed over the input data), and not -trained using the back-propagation approach. Only the weights of the MLP +trained using the backpropagation approach. Only the weights of the MLP are actually trained during the optimization loop. .. code:: ipython3 - poisson_problem = SnapshotProblem() - pod_nn = PODNN(pod_rank=20, layers=[10, 10, 10], func=torch.nn.Tanh) - pod_nn.fit_pod(u) - - pinn_stokes = SupervisedSolver( - problem=poisson_problem, - model=pod_nn, + pod_nn.fit_pod(u_train) + + pod_nn_stokes = SupervisedSolver( + problem=poisson_problem, + model=pod_nn, optimizer=torch.optim.Adam, optimizer_kwargs={'lr': 0.0001}) -Now that we set the ``Problem`` and the ``Model``, we have just to train -the model and use it for predict the test snapshots. +Now that we have set the ``Problem`` and the ``Model``, we have just to +train the model and use it for predicting the test snapshots. .. code:: ipython3 trainer = Trainer( - solver=pinn_stokes, + solver=pod_nn_stokes, max_epochs=1000, batch_size=100, log_every_n_steps=5, @@ -196,15 +274,41 @@ the model and use it for predict the test snapshots. .. parsed-literal:: - `Trainer.fit` stopped: `max_epochs=1000` reached. + GPU available: True (cuda), used: False + TPU available: False, using: 0 TPU cores + IPU available: False, using: 0 IPUs + HPU available: False, using: 0 HPUs + /u/a/aivagnes/anaconda3/lib/python3.8/site-packages/pytorch_lightning/trainer/setup.py:187: GPU available but not used. You can set it by doing `Trainer(accelerator='gpu')`. + + | Name | Type | Params + ---------------------------------------- + 0 | _loss | MSELoss | 0 + 1 | _neural_net | Network | 460 + ---------------------------------------- + 460 Trainable params + 0 Non-trainable params + 460 Total params + 0.002 Total estimated model params size (MB) + /u/a/aivagnes/anaconda3/lib/python3.8/site-packages/torch/cuda/__init__.py:152: UserWarning: + Found GPU0 Quadro K600 which is of cuda capability 3.0. + PyTorch no longer supports this GPU because it is too old. + The minimum cuda capability supported by this library is 3.7. + + warnings.warn(old_gpu_warn % (d, name, major, minor, min_arch // 10, min_arch % 10)) + + + +.. parsed-literal:: + + Training: | | 0/? [00:00`_, `Nicola Demo `_, and `Anna Ivagnes `_ -under the supervision of `Prof. Gianluigi Rozza `_ in the `SISSA MathLab `_ group. +PINA is currently developed in the `SISSA MathLab `_, in colaboration with `Fast Computing `_. A significant part of PINA has been written either as a by-product for other projects people were funded for, or by people on university-funded positions. There are probably many of such projects that have led to some development of PINA. We are very grateful for this support! diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt index 684867fb..c8430792 100644 --- a/_sources/index.rst.txt +++ b/_sources/index.rst.txt @@ -1,73 +1,76 @@ -Welcome to PINA's documentation! -=================================================== +:html_theme.sidebar_secondary.remove: -Physics Informed Neural network for Advanced modeling (**PINA**) is -an open-source Python library providing an intuitive interface for -solving differential equations using PINNs, NOs or both together. -Based on `PyTorch `_ and `PyTorchLightning `_, -PINA offers a simple and intuitive way to formalize a specific (differential) problem -and solve it using neural networks . The approximated solution of a differential equation -can be implemented using PINA in a few lines of code thanks to the intuitive and user-friendly interface. +Welcome to PINA’s documentation! +======================================= -`PyTorchLightning `_ as backhand is done to offer -professional AI researchers and machine learning engineers the possibility of using advancement -training strategies provided by the library, such as multiple device training, modern model compression techniques, -gradient accumulation, and so on. In addition, it provides the possibility to add arbitrary -self-contained routines (callbacks) to the training for easy extensions without the need to touch the -underlying code. +.. grid:: 6 + :gutter: 1 -The high-level structure of the package is depicted in our API. The pipeline to solve differential equations -with PINA follows just five steps: problem definition, model selection, data generation, solver selection, and training. + .. grid-item:: -.. figure:: index_files/API_color.png - :alt: PINA application program interface - :align: center - :width: 600 + .. image:: index_files/tutorial_13_3.png + :target: _rst/tutorials/tutorial2/tutorial.html -| + .. grid-item:: -Physics-informed neural network -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. image:: index_files/tutorial_32_0.png + :target: _rst/tutorials/tutorial4/tutorial.html -`PINN `_ is a novel approach that -involves neural networks to solve differential equations in an unsupervised manner, while respecting -any given law of physics described by general differential equations. Proposed in "*Physics-informed neural -networks: A deep learning framework for solving forward and inverse problems -involving nonlinear partial differential equations*", such framework aims to -solve problems in a continuous and nonlinear settings. + .. grid-item:: -Neural operator learning -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. image:: index_files/tutorial_13_01.png + :target: _rst/tutorials/tutorial9/tutorial.html -`Neural Operators `_ is a novel approach involving neural networks -to learn differential operators using supervised learning strategies. By learning the differential operator, the -neural network is able to generalize across different instances of the differential equations (e.g. different forcing -terms), without the need of re-training. + .. grid-item:: + .. image:: index_files/tutorial_36_0.png + :target: _rst/tutorials/tutorial6/tutorial.html + .. grid-item:: + + .. image:: index_files/tutorial_15_0.png + :target: _rst/tutorials/tutorial13/tutorial.html + + .. grid-item:: + + .. image:: index_files/tutorial_5_0.png + :target: _rst/tutorials/tutorial10/tutorial.html + +.. grid:: 1 1 3 3 + + .. grid-item:: + :columns: 12 12 8 8 + + Physics Informed Neural network for Advanced modeling (**PINA**) is + an open-source Python library providing an intuitive interface for + solving differential equations using PINNs, NOs or both together. + + Based on `PyTorch `_ and `PyTorchLightning `_, **PINA** offers a simple and intuitive way to formalize a specific (differential) problem + and solve it using neural networks . The approximated solution of a differential equation + can be implemented using PINA in a few lines of code thanks to the intuitive and user-friendly interface. + + + + For further information or questions about **PINA** contact us by email. + -.. toctree:: - :maxdepth: 1 - :caption: Package Documentation: - API <_rst/_code> -.. the following is demo content intended to showcase some of the features you can invoke in reStructuredText -.. this can be safely deleted or commented out -.. ........................................................................................ + .. grid-item-card:: Contents + :class-title: sd-fs-5 + :class-body: sd-pl-4 + + .. toctree:: + :maxdepth: 1 + + Installing <_rst/_installation> + Tutorial <_rst/_tutorial> + API <_rst/_code> + Team & Foundings <_team.rst> + Contributing <_rst/_contributing> + License <_LICENSE.rst> + Cite PINA <_cite.rst> -.. toctree:: - :maxdepth: 1 - :caption: Getting Started: - Installation <_rst/_installation> - Tutorials <_rst/_tutorial> -.. toctree:: - :maxdepth: 1 - :caption: Community: - Team & Fundings <_team.rst> - Contributing <_rst/_contributing> - License <_LICENSE.rst> - Cite PINA <_cite.rst> diff --git a/_sphinx_design_static/design-tabs.js b/_sphinx_design_static/design-tabs.js new file mode 100644 index 00000000..b25bd6a4 --- /dev/null +++ b/_sphinx_design_static/design-tabs.js @@ -0,0 +1,101 @@ +// @ts-check + +// Extra JS capability for selected tabs to be synced +// The selection is stored in local storage so that it persists across page loads. + +/** + * @type {Record} + */ +let sd_id_to_elements = {}; +const storageKeyPrefix = "sphinx-design-tab-id-"; + +/** + * Create a key for a tab element. + * @param {HTMLElement} el - The tab element. + * @returns {[string, string, string] | null} - The key. + * + */ +function create_key(el) { + let syncId = el.getAttribute("data-sync-id"); + let syncGroup = el.getAttribute("data-sync-group"); + if (!syncId || !syncGroup) return null; + return [syncGroup, syncId, syncGroup + "--" + syncId]; +} + +/** + * Initialize the tab selection. + * + */ +function ready() { + // Find all tabs with sync data + + /** @type {string[]} */ + let groups = []; + + document.querySelectorAll(".sd-tab-label").forEach((label) => { + if (label instanceof HTMLElement) { + let data = create_key(label); + if (data) { + let [group, id, key] = data; + + // add click event listener + // @ts-ignore + label.onclick = onSDLabelClick; + + // store map of key to elements + if (!sd_id_to_elements[key]) { + sd_id_to_elements[key] = []; + } + sd_id_to_elements[key].push(label); + + if (groups.indexOf(group) === -1) { + groups.push(group); + // Check if a specific tab has been selected via URL parameter + const tabParam = new URLSearchParams(window.location.search).get( + group + ); + if (tabParam) { + console.log( + "sphinx-design: Selecting tab id for group '" + + group + + "' from URL parameter: " + + tabParam + ); + window.sessionStorage.setItem(storageKeyPrefix + group, tabParam); + } + } + + // Check is a specific tab has been selected previously + let previousId = window.sessionStorage.getItem( + storageKeyPrefix + group + ); + if (previousId === id) { + // console.log( + // "sphinx-design: Selecting tab from session storage: " + id + // ); + // @ts-ignore + label.previousElementSibling.checked = true; + } + } + } + }); +} + +/** + * Activate other tabs with the same sync id. + * + * @this {HTMLElement} - The element that was clicked. + */ +function onSDLabelClick() { + let data = create_key(this); + if (!data) return; + let [group, id, key] = data; + for (const label of sd_id_to_elements[key]) { + if (label === this) continue; + // @ts-ignore + label.previousElementSibling.checked = true; + } + window.sessionStorage.setItem(storageKeyPrefix + group, id); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/_sphinx_design_static/sphinx-design.min.css b/_sphinx_design_static/sphinx-design.min.css new file mode 100644 index 00000000..860c36da --- /dev/null +++ b/_sphinx_design_static/sphinx-design.min.css @@ -0,0 +1 @@ +.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative;font-size:var(--sd-fontsize-dropdown)}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary.sd-summary-title{padding:.5em .6em .5em 1em;font-size:var(--sd-fontsize-dropdown-title);font-weight:var(--sd-fontweight-dropdown-title);user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;list-style:none;display:inline-flex;justify-content:space-between}details.sd-dropdown summary.sd-summary-title::-webkit-details-marker{display:none}details.sd-dropdown summary.sd-summary-title:focus{outline:none}details.sd-dropdown summary.sd-summary-title .sd-summary-icon{margin-right:.6em;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary.sd-summary-title .sd-summary-text{flex-grow:1;line-height:1.5;padding-right:.5rem}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker{pointer-events:none;display:inline-flex;align-items:center}details.sd-dropdown summary.sd-summary-title .sd-summary-state-marker svg{opacity:.6}details.sd-dropdown summary.sd-summary-title:hover .sd-summary-state-marker svg{opacity:1;transform:scale(1.1)}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown .sd-summary-chevron-right{transition:.25s}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-right{transform:rotate(90deg)}details.sd-dropdown[open]>.sd-summary-title .sd-summary-chevron-down{transform:rotate(180deg)}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #0071bc;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0060a0;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-bg: rgba(0, 113, 188, 0.2);--sd-color-secondary-bg: rgba(108, 117, 125, 0.2);--sd-color-success-bg: rgba(40, 167, 69, 0.2);--sd-color-info-bg: rgba(23, 162, 184, 0.2);--sd-color-warning-bg: rgba(240, 179, 126, 0.2);--sd-color-danger-bg: rgba(220, 53, 69, 0.2);--sd-color-light-bg: rgba(248, 249, 250, 0.2);--sd-color-muted-bg: rgba(108, 117, 125, 0.2);--sd-color-dark-bg: rgba(33, 37, 41, 0.2);--sd-color-black-bg: rgba(0, 0, 0, 0.2);--sd-color-white-bg: rgba(255, 255, 255, 0.2);--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem;--sd-fontsize-dropdown: inherit;--sd-fontsize-dropdown-title: 1rem;--sd-fontweight-dropdown-title: 700} diff --git a/_static/PINA_logo.png b/_static/PINA_logo.png new file mode 100644 index 00000000..bac7ee37 Binary files /dev/null and b/_static/PINA_logo.png differ diff --git a/_static/basic.css b/_static/basic.css index 01192852..2af6139e 100644 --- a/_static/basic.css +++ b/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -15,6 +15,12 @@ div.clearer { clear: both; } +div.section::after { + display: block; + content: ''; + clear: left; +} + /* -- relbar ---------------------------------------------------------------- */ div.related { @@ -49,7 +55,7 @@ div.sphinxsidebarwrapper { div.sphinxsidebar { float: left; - width: 230px; + width: 270px; margin-left: -100%; font-size: 90%; word-wrap: break-word; @@ -124,7 +130,7 @@ ul.search li a { font-weight: bold; } -ul.search li div.context { +ul.search li p.context { color: #888; margin: 2px 0 0 30px; text-align: left; @@ -216,7 +222,7 @@ table.modindextable td { /* -- general body styles --------------------------------------------------- */ div.body { - min-width: 450px; + min-width: 360px; max-width: 800px; } @@ -231,14 +237,8 @@ a.headerlink { visibility: hidden; } -a.brackets:before, -span.brackets > a:before{ - content: "["; -} - -a.brackets:after, -span.brackets > a:after { - content: "]"; +a:visited { + color: #551A8B; } h1:hover > a.headerlink, @@ -271,25 +271,25 @@ p.rubric { font-weight: bold; } -img.align-left, .figure.align-left, object.align-left { +img.align-left, figure.align-left, .figure.align-left, object.align-left { clear: left; float: left; margin-right: 1em; } -img.align-right, .figure.align-right, object.align-right { +img.align-right, figure.align-right, .figure.align-right, object.align-right { clear: right; float: right; margin-left: 1em; } -img.align-center, .figure.align-center, object.align-center { +img.align-center, figure.align-center, .figure.align-center, object.align-center { display: block; margin-left: auto; margin-right: auto; } -img.align-default, .figure.align-default { +img.align-default, figure.align-default, .figure.align-default { display: block; margin-left: auto; margin-right: auto; @@ -313,24 +313,35 @@ img.align-default, .figure.align-default { /* -- sidebars -------------------------------------------------------------- */ -div.sidebar { +div.sidebar, +aside.sidebar { margin: 0 0 0.5em 1em; border: 1px solid #ddb; - padding: 7px 7px 0 7px; + padding: 7px; background-color: #ffe; width: 40%; float: right; + clear: right; + overflow-x: auto; } p.sidebar-title { font-weight: bold; } +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + /* -- topics ---------------------------------------------------------------- */ +nav.contents, +aside.topic, div.topic { border: 1px solid #ccc; - padding: 7px 7px 0 7px; + padding: 7px; margin: 10px 0 10px 0; } @@ -352,10 +363,6 @@ div.admonition dt { font-weight: bold; } -div.admonition dl { - margin-bottom: 0; -} - p.admonition-title { margin: 0px 10px 5px 0px; font-weight: bold; @@ -366,9 +373,34 @@ div.body p.centered { margin-top: 25px; } +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + /* -- tables ---------------------------------------------------------------- */ table.docutils { + margin-top: 10px; + margin-bottom: 10px; border: 0; border-collapse: collapse; } @@ -398,10 +430,6 @@ table.docutils td, table.docutils th { border-bottom: 1px solid #aaa; } -table.footnote td, table.footnote th { - border: 0 !important; -} - th { text-align: left; padding-right: 5px; @@ -416,32 +444,34 @@ table.citation td { border-bottom: none; } -th > p:first-child, -td > p:first-child { +th > :first-child, +td > :first-child { margin-top: 0px; } -th > p:last-child, -td > p:last-child { +th > :last-child, +td > :last-child { margin-bottom: 0px; } /* -- figures --------------------------------------------------------------- */ -div.figure { +div.figure, figure { margin: 0.5em; padding: 0.5em; } -div.figure p.caption { +div.figure p.caption, figcaption { padding: 0.3em; } -div.figure p.caption span.caption-number { +div.figure p.caption span.caption-number, +figcaption span.caption-number { font-style: italic; } -div.figure p.caption span.caption-text { +div.figure p.caption span.caption-text, +figcaption span.caption-text { } /* -- field list styles ----------------------------------------------------- */ @@ -468,10 +498,71 @@ table.field-list td, table.field-list th { /* -- hlist styles ---------------------------------------------------------- */ +table.hlist { + margin: 1em 0; +} + table.hlist td { vertical-align: top; } +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + /* -- other body styles ----------------------------------------------------- */ @@ -495,26 +586,53 @@ ol.upperroman { list-style: upper-roman; } -li > p:first-child { +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { margin-top: 0px; } -li > p:last-child { +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { margin-bottom: 0px; } -dl.footnote > dt, -dl.citation > dt { - float: left; +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; } -dl.footnote > dd, -dl.citation > dd { - margin-bottom: 0em; +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; } -dl.footnote > dd:after, -dl.citation > dd:after { +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { content: ""; clear: both; } @@ -531,10 +649,6 @@ dl.field-list > dt { padding-right: 5px; } -dl.field-list > dt:after { - content: ":"; -} - dl.field-list > dd { padding-left: 0.5em; margin-top: 0em; @@ -546,7 +660,7 @@ dl { margin-bottom: 15px; } -dd > p:first-child { +dd > :first-child { margin-top: 0px; } @@ -560,6 +674,21 @@ dd { margin-left: 30px; } +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + dt:target, span.highlighted { background-color: #fbe54e; } @@ -573,14 +702,6 @@ dl.glossary dt { font-size: 1.1em; } -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - .versionmodified { font-style: italic; } @@ -621,8 +742,9 @@ dl.glossary dt { .classifier:before { font-style: normal; - margin: 0.5em; + margin: 0 0.5em; content: ":"; + display: inline-block; } abbr, acronym { @@ -630,6 +752,14 @@ abbr, acronym { cursor: help; } +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + /* -- code displays --------------------------------------------------------- */ pre { @@ -637,29 +767,69 @@ pre { overflow-y: hidden; /* fixes display issues on Chrome browsers */ } +pre, div[class*="highlight-"] { + clear: both; +} + span.pre { -moz-hyphens: none; -ms-hyphens: none; -webkit-hyphens: none; hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; } td.linenos pre { - padding: 5px 0px; border: 0; background-color: transparent; color: #aaa; } table.highlighttable { - margin-left: 0.5em; + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; } table.highlighttable td { - padding: 0 0.5em 0 0.5em; + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; } div.code-block-caption { + margin-top: 1em; padding: 2px 5px; font-size: small; } @@ -668,12 +838,14 @@ div.code-block-caption code { background-color: transparent; } -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.doctest > div.highlight span.gp { /* gp: Generic.Prompt */ - user-select: none; +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ } div.code-block-caption span.caption-number { @@ -685,21 +857,7 @@ div.code-block-caption span.caption-text { } div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; + margin: 1em 0; } code.xref, a code { @@ -740,8 +898,7 @@ span.eqno { } span.eqno a.headerlink { - position: relative; - left: 0px; + position: absolute; z-index: 1; } diff --git a/_static/css/badge_only.css b/_static/css/badge_only.css deleted file mode 100644 index c718cee4..00000000 --- a/_static/css/badge_only.css +++ /dev/null @@ -1 +0,0 @@ -.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/_static/css/fonts/Roboto-Slab-Bold.woff b/_static/css/fonts/Roboto-Slab-Bold.woff deleted file mode 100644 index 6cb60000..00000000 Binary files a/_static/css/fonts/Roboto-Slab-Bold.woff and /dev/null differ diff --git a/_static/css/fonts/Roboto-Slab-Bold.woff2 b/_static/css/fonts/Roboto-Slab-Bold.woff2 deleted file mode 100644 index 7059e231..00000000 Binary files a/_static/css/fonts/Roboto-Slab-Bold.woff2 and /dev/null differ diff --git a/_static/css/fonts/Roboto-Slab-Regular.woff b/_static/css/fonts/Roboto-Slab-Regular.woff deleted file mode 100644 index f815f63f..00000000 Binary files a/_static/css/fonts/Roboto-Slab-Regular.woff and /dev/null differ diff --git a/_static/css/fonts/Roboto-Slab-Regular.woff2 b/_static/css/fonts/Roboto-Slab-Regular.woff2 deleted file mode 100644 index f2c76e5b..00000000 Binary files a/_static/css/fonts/Roboto-Slab-Regular.woff2 and /dev/null differ diff --git a/_static/css/fonts/fontawesome-webfont.eot b/_static/css/fonts/fontawesome-webfont.eot deleted file mode 100644 index e9f60ca9..00000000 Binary files a/_static/css/fonts/fontawesome-webfont.eot and /dev/null differ diff --git a/_static/css/fonts/fontawesome-webfont.svg b/_static/css/fonts/fontawesome-webfont.svg deleted file mode 100644 index 855c845e..00000000 --- a/_static/css/fonts/fontawesome-webfont.svg +++ /dev/null @@ -1,2671 +0,0 @@ - - - - -Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 - By ,,, -Copyright Dave Gandy 2016. All rights reserved. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/_static/css/fonts/fontawesome-webfont.ttf b/_static/css/fonts/fontawesome-webfont.ttf deleted file mode 100644 index 35acda2f..00000000 Binary files a/_static/css/fonts/fontawesome-webfont.ttf and /dev/null differ diff --git a/_static/css/fonts/fontawesome-webfont.woff b/_static/css/fonts/fontawesome-webfont.woff deleted file mode 100644 index 400014a4..00000000 Binary files a/_static/css/fonts/fontawesome-webfont.woff and /dev/null differ diff --git a/_static/css/fonts/fontawesome-webfont.woff2 b/_static/css/fonts/fontawesome-webfont.woff2 deleted file mode 100644 index 4d13fc60..00000000 Binary files a/_static/css/fonts/fontawesome-webfont.woff2 and /dev/null differ diff --git a/_static/css/fonts/lato-bold-italic.woff b/_static/css/fonts/lato-bold-italic.woff deleted file mode 100644 index 88ad05b9..00000000 Binary files a/_static/css/fonts/lato-bold-italic.woff and /dev/null differ diff --git a/_static/css/fonts/lato-bold-italic.woff2 b/_static/css/fonts/lato-bold-italic.woff2 deleted file mode 100644 index c4e3d804..00000000 Binary files a/_static/css/fonts/lato-bold-italic.woff2 and /dev/null differ diff --git a/_static/css/fonts/lato-bold.woff b/_static/css/fonts/lato-bold.woff deleted file mode 100644 index c6dff51f..00000000 Binary files a/_static/css/fonts/lato-bold.woff and /dev/null differ diff --git a/_static/css/fonts/lato-bold.woff2 b/_static/css/fonts/lato-bold.woff2 deleted file mode 100644 index bb195043..00000000 Binary files a/_static/css/fonts/lato-bold.woff2 and /dev/null differ diff --git a/_static/css/fonts/lato-normal-italic.woff b/_static/css/fonts/lato-normal-italic.woff deleted file mode 100644 index 76114bc0..00000000 Binary files a/_static/css/fonts/lato-normal-italic.woff and /dev/null differ diff --git a/_static/css/fonts/lato-normal-italic.woff2 b/_static/css/fonts/lato-normal-italic.woff2 deleted file mode 100644 index 3404f37e..00000000 Binary files a/_static/css/fonts/lato-normal-italic.woff2 and /dev/null differ diff --git a/_static/css/fonts/lato-normal.woff b/_static/css/fonts/lato-normal.woff deleted file mode 100644 index ae1307ff..00000000 Binary files a/_static/css/fonts/lato-normal.woff and /dev/null differ diff --git a/_static/css/fonts/lato-normal.woff2 b/_static/css/fonts/lato-normal.woff2 deleted file mode 100644 index 3bf98433..00000000 Binary files a/_static/css/fonts/lato-normal.woff2 and /dev/null differ diff --git a/_static/css/theme.css b/_static/css/theme.css deleted file mode 100644 index 19a446a0..00000000 --- a/_static/css/theme.css +++ /dev/null @@ -1,4 +0,0 @@ -html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! - * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel,.rst-content .menuselection{font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .guilabel,.rst-content .menuselection{border:1px solid #7fbbe3;background:#e7f2fa}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/_static/design-tabs.js b/_static/design-tabs.js new file mode 100644 index 00000000..b25bd6a4 --- /dev/null +++ b/_static/design-tabs.js @@ -0,0 +1,101 @@ +// @ts-check + +// Extra JS capability for selected tabs to be synced +// The selection is stored in local storage so that it persists across page loads. + +/** + * @type {Record} + */ +let sd_id_to_elements = {}; +const storageKeyPrefix = "sphinx-design-tab-id-"; + +/** + * Create a key for a tab element. + * @param {HTMLElement} el - The tab element. + * @returns {[string, string, string] | null} - The key. + * + */ +function create_key(el) { + let syncId = el.getAttribute("data-sync-id"); + let syncGroup = el.getAttribute("data-sync-group"); + if (!syncId || !syncGroup) return null; + return [syncGroup, syncId, syncGroup + "--" + syncId]; +} + +/** + * Initialize the tab selection. + * + */ +function ready() { + // Find all tabs with sync data + + /** @type {string[]} */ + let groups = []; + + document.querySelectorAll(".sd-tab-label").forEach((label) => { + if (label instanceof HTMLElement) { + let data = create_key(label); + if (data) { + let [group, id, key] = data; + + // add click event listener + // @ts-ignore + label.onclick = onSDLabelClick; + + // store map of key to elements + if (!sd_id_to_elements[key]) { + sd_id_to_elements[key] = []; + } + sd_id_to_elements[key].push(label); + + if (groups.indexOf(group) === -1) { + groups.push(group); + // Check if a specific tab has been selected via URL parameter + const tabParam = new URLSearchParams(window.location.search).get( + group + ); + if (tabParam) { + console.log( + "sphinx-design: Selecting tab id for group '" + + group + + "' from URL parameter: " + + tabParam + ); + window.sessionStorage.setItem(storageKeyPrefix + group, tabParam); + } + } + + // Check is a specific tab has been selected previously + let previousId = window.sessionStorage.getItem( + storageKeyPrefix + group + ); + if (previousId === id) { + // console.log( + // "sphinx-design: Selecting tab from session storage: " + id + // ); + // @ts-ignore + label.previousElementSibling.checked = true; + } + } + } + }); +} + +/** + * Activate other tabs with the same sync id. + * + * @this {HTMLElement} - The element that was clicked. + */ +function onSDLabelClick() { + let data = create_key(this); + if (!data) return; + let [group, id, key] = data; + for (const label of sd_id_to_elements[key]) { + if (label === this) continue; + // @ts-ignore + label.previousElementSibling.checked = true; + } + window.sessionStorage.setItem(storageKeyPrefix + group, id); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/_static/doctools.js b/_static/doctools.js index daccd209..4d67807d 100644 --- a/_static/doctools.js +++ b/_static/doctools.js @@ -2,314 +2,155 @@ * doctools.js * ~~~~~~~~~~~ * - * Sphinx JavaScript utilities for all documentation. + * Base JavaScript utilities for all Sphinx HTML documentation. * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); } - return result; }; -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - /** * Small JavaScript module for the documentation. */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); }, /** * i18n support */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", // gettext and ngettext don't access this so that the functions // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } }, - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; }, - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; }, /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + * helper function to focus on search bar */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); }, /** - * highlight the search words provided in the url in the text + * Initialise the domain index toggle buttons */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, + }; - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); }, - initOnKeyListeners: function() { - $(document).keydown(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' - && !event.altKey && !event.ctrlKey && !event.metaKey && !event.shiftKey) { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); } + break; } } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } }); - } + }, }; // quick alias for translations -_ = Documentation.gettext; +const _ = Documentation.gettext; -$(document).ready(function() { - Documentation.init(); -}); +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js index f7d8a7bf..d2ff5490 100644 --- a/_static/documentation_options.js +++ b/_static/documentation_options.js @@ -1,11 +1,13 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '0.1.1.post2407', +const DOCUMENTATION_OPTIONS = { + VERSION: '0.1.2', LANGUAGE: 'en', COLLAPSE_INDEX: false, BUILDER: 'html', FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', HAS_SOURCE: true, SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, }; \ No newline at end of file diff --git a/_static/jquery-3.4.1.js b/_static/jquery-3.4.1.js deleted file mode 100644 index 773ad95c..00000000 --- a/_static/jquery-3.4.1.js +++ /dev/null @@ -1,10598 +0,0 @@ -/*! - * jQuery JavaScript Library v3.4.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2019-05-01T21:04Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - return typeof obj === "function" && typeof obj.nodeType !== "number"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.4.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a global context - globalEval: function( code, options ) { - DOMEval( code, { nonce: options && options.nonce } ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.4 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2019-04-08 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) && - - // Support: IE 8 only - // Exclude object elements - (nodeType !== 1 || context.nodeName.toLowerCase() !== "object") ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && rdescend.test( selector ) ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem.namespaceURI, - docElem = (elem.ownerDocument || elem).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9-11, Edge - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - if ( preferredDoc !== document && - (subWindow = document.defaultView) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert(function( el ) { - el.className = "i"; - return !el.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( el ) { - el.appendChild( document.createComment("") ); - return !el.getElementsByTagName("*").length; - }); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - }); - - // ID filter and find - if ( support.getById ) { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( (elem = elems[i++]) ) { - node = elem.getAttributeNode("id"); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( el ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll("[msallowcapture^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push("~="); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push(".#.+[+~]"); - } - }); - - assert(function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement("input"); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll("[name=d]").length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll(":enabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll(":disabled").length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( el ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { - return -1; - } - if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - return a === document ? -1 : - b === document ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch (e) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return (sel + "").replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - while ( (node = elem[i++]) ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[6] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] ) { - match[2] = match[4] || match[5] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - // Use previously-cached element index if available - if ( useCache ) { - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || (node[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - (outerCache[ node.uniqueID ] = {}); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - // Don't keep the element (issue #299) - input[0] = null; - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( (tokens = []) ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( (oldCache = uniqueCache[ key ]) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return (newCache[ 2 ] = oldCache[ 2 ]); - } else { - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), - len = elems.length; - - if ( outermost ) { - outermostContext = context === document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - if ( !context && elem.ownerDocument !== document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context || document, xml) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( (selector = compiled.selector || selector) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( el ) { - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( el ) { - return el.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - null; - } - }); -} - -return Sizzle; - -})( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( typeof elem.contentDocument !== "undefined" ) { - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - -var swap = function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // Support: IE <=9 only - option: [ 1, "" ], - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -// Support: IE <=9 only -wrapMap.optgroup = wrapMap.option; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; -} )(); - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = {}; - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - // Make a writable jQuery.Event from the native event object - var event = jQuery.event.fix( nativeEvent ); - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - return result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - /* eslint-disable max-len */ - - // See https://github.com/eslint/eslint/issues/3229 - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, - - /* eslint-enable */ - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.access( src ); - pdataCur = dataPriv.set( dest, pdataOld ); - events = pdataOld.events; - - if ( events ) { - delete pdataCur.handle; - pdataCur.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = concat.apply( [], args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - } ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html.replace( rxhtmlTag, "<$1>" ); - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - // Support: IE 9-11 only - // Also use offsetWidth/offsetHeight for when box sizing is unreliable - // We use getClientRects() to check for hidden/disconnected. - // In those cases, the computed value can be trusted to be border-box - if ( ( !support.boxSizingReliable() && isBorderBox || - val === "auto" || - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue && type !== false ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = Date.now(); - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - - -jQuery._evalUrl = function( url, options ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " +{% endmacro %} + +{% macro body_post() %} + + + +{% endmacro %} \ No newline at end of file diff --git a/_team.html b/_team.html index d7685f7c..990f77ad 100644 --- a/_team.html +++ b/_team.html @@ -1,99 +1,473 @@ + - - - - - PINA Team — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - + + + + + + + + + PINA Team — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + +
+ + + + + + + + + + + + +
- - -
- -
-
-
- -
+
_images/foudings.png -
+

We also acknowledge the contribuition of Maria Strazzullo in the early developments of the package. A special thank goeas to all the students and researchers from different universities which contributed to the package. Finally we warmly thank all the contributors!

-
+
_images/university_dev_pina.png -
-
+ +
-
-
- - - - + + + + +
+ +
+ + + - + + + + + +
+ - +
+ \ No newline at end of file diff --git a/genindex.html b/genindex.html index f00550f6..7ca4fde1 100644 --- a/genindex.html +++ b/genindex.html @@ -1,93 +1,430 @@ - - - - - Index — PINA 0.1.1.post2407 documentation - - + + + + + + + Index — PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + + + - - + + + + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
+ +
+ +
+ + + + + +
+
+ + + + + +
+ + + + + +
+ + +
+
+ +
+ + + + + +
+

Index

@@ -113,8 +450,6 @@

Index

| U | V | W - | X - | Z

A

@@ -140,6 +475,8 @@

A

  • AdaptiveSiLU (class in pina.adaptive_functions.adaptive_func)
  • + + - @@ -246,92 +503,18 @@

    A

    B

    @@ -340,1392 +523,651 @@

    C

    - - - -

    D

    - - -
    - -

    E

    - - - -
    - -

    F

    - - - -
    - -

    G

    - - - -
    - -

    H

    - - - -
    - -

    I

    - - - -
    - -

    K

    - - -
    - -

    L

    - - - -
    - -

    M

    - - - -
    - -

    N

    - - - + +
    + +

    D

    + + +
    -
  • Network (class in pina.model.network) +
  • Difference (class in pina.geometry.difference_domain)
  • -
  • neural_net() (CompetitivePINN property) - -
  • -
  • not_sampled_points() (AbstractProblem property) +
  • domain (AbstractProblem property)
  • -

    O

    +

    E

    + +
    -
  • on_train_epoch_start() (SwitchOptimizer method) +
  • Exclusion (class in pina.geometry.exclusion_domain)
  • -
  • on_train_start() (R3Refinement method) - -
  • + +

    F

    + -
    - -

    P

    - - - +
    +
    -

    R

    +

    G

    + +
    -
    + + + + + + +
    + + + +
    -
    - -
    - -
    -

    © Copyright 2021-2024, PINA Contributors. - Last updated on Jul 01, 2024. -

    +
    + +
    + + +
    + + + + - Built with Sphinx using a - theme - provided by Read the Docs. - +
    +
    - - - - - + + + + + + + + + + + + - + + \ No newline at end of file diff --git a/search.html b/search.html index 412d3ba4..8d876dc8 100644 --- a/search.html +++ b/search.html @@ -1,142 +1,524 @@ + - - - - - Search — PINA 0.1.1.post2407 documentation - - - - - - - - - - - - - - - - - + + + + + + + Search - PINA 0.1.2 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + + + + + + + - -
    - - -
    - -
    -
    -
    -
    + +
    + + +
    +
    + +
    + + +
    +

    Search

    + + + +
    - + + + + + +
    + + + + +
    +
    + +
    + +
    +
    + - - + + + +
    + - +
    + \ No newline at end of file diff --git a/searchindex.js b/searchindex.js index e93ae4a9..0af4f4c7 100644 --- a/searchindex.js +++ b/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["_LICENSE","_cite","_rst/_code","_rst/_contributing","_rst/_installation","_rst/_tutorial","_rst/adaptive_functions/AdaptiveCELU","_rst/adaptive_functions/AdaptiveELU","_rst/adaptive_functions/AdaptiveExp","_rst/adaptive_functions/AdaptiveFunctionInterface","_rst/adaptive_functions/AdaptiveGELU","_rst/adaptive_functions/AdaptiveMish","_rst/adaptive_functions/AdaptiveReLU","_rst/adaptive_functions/AdaptiveSIREN","_rst/adaptive_functions/AdaptiveSiLU","_rst/adaptive_functions/AdaptiveSigmoid","_rst/adaptive_functions/AdaptiveSoftmax","_rst/adaptive_functions/AdaptiveSoftmin","_rst/adaptive_functions/AdaptiveTanh","_rst/callbacks/adaptive_refinment_callbacks","_rst/callbacks/optimizer_callbacks","_rst/callbacks/processing_callbacks","_rst/condition","_rst/equations","_rst/geometry/cartesian","_rst/geometry/difference_domain","_rst/geometry/ellipsoid","_rst/geometry/exclusion_domain","_rst/geometry/intersection_domain","_rst/geometry/location","_rst/geometry/operation_interface","_rst/geometry/simplex","_rst/geometry/union_domain","_rst/label_tensor","_rst/layers/avno_layer","_rst/layers/convolution","_rst/layers/enhanced_linear","_rst/layers/fourier","_rst/layers/fourier_embedding","_rst/layers/lowrank_layer","_rst/layers/pbc_embedding","_rst/layers/pod","_rst/layers/residual","_rst/layers/spectral","_rst/loss/loss_interface","_rst/loss/lploss","_rst/loss/powerloss","_rst/models/avno","_rst/models/base_no","_rst/models/deeponet","_rst/models/fnn","_rst/models/fnn_residual","_rst/models/fno","_rst/models/fourier_kernel","_rst/models/lno","_rst/models/mionet","_rst/models/multifeedforward","_rst/models/network","_rst/operators","_rst/plotter","_rst/problem/abstractproblem","_rst/problem/parametricproblem","_rst/problem/spatialproblem","_rst/problem/timedepproblem","_rst/solvers/basepinn","_rst/solvers/causalpinn","_rst/solvers/competitivepinn","_rst/solvers/garom","_rst/solvers/gpinn","_rst/solvers/pinn","_rst/solvers/rba_pinn","_rst/solvers/rom","_rst/solvers/sapinn","_rst/solvers/solver_interface","_rst/solvers/supervised","_rst/trainer","_rst/tutorials/tutorial1/tutorial","_rst/tutorials/tutorial10/tutorial","_rst/tutorials/tutorial11/tutorial","_rst/tutorials/tutorial12/tutorial","_rst/tutorials/tutorial13/tutorial","_rst/tutorials/tutorial2/tutorial","_rst/tutorials/tutorial3/tutorial","_rst/tutorials/tutorial4/tutorial","_rst/tutorials/tutorial5/tutorial","_rst/tutorials/tutorial6/tutorial","_rst/tutorials/tutorial7/tutorial","_rst/tutorials/tutorial8/tutorial","_rst/tutorials/tutorial9/tutorial","_team","index"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.index":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.intersphinx":1,"sphinx.ext.todo":2,"sphinx.ext.viewcode":1,sphinx:56},filenames:["_LICENSE.rst","_cite.rst","_rst/_code.rst","_rst/_contributing.rst","_rst/_installation.rst","_rst/_tutorial.rst","_rst/adaptive_functions/AdaptiveCELU.rst","_rst/adaptive_functions/AdaptiveELU.rst","_rst/adaptive_functions/AdaptiveExp.rst","_rst/adaptive_functions/AdaptiveFunctionInterface.rst","_rst/adaptive_functions/AdaptiveGELU.rst","_rst/adaptive_functions/AdaptiveMish.rst","_rst/adaptive_functions/AdaptiveReLU.rst","_rst/adaptive_functions/AdaptiveSIREN.rst","_rst/adaptive_functions/AdaptiveSiLU.rst","_rst/adaptive_functions/AdaptiveSigmoid.rst","_rst/adaptive_functions/AdaptiveSoftmax.rst","_rst/adaptive_functions/AdaptiveSoftmin.rst","_rst/adaptive_functions/AdaptiveTanh.rst","_rst/callbacks/adaptive_refinment_callbacks.rst","_rst/callbacks/optimizer_callbacks.rst","_rst/callbacks/processing_callbacks.rst","_rst/condition.rst","_rst/equations.rst","_rst/geometry/cartesian.rst","_rst/geometry/difference_domain.rst","_rst/geometry/ellipsoid.rst","_rst/geometry/exclusion_domain.rst","_rst/geometry/intersection_domain.rst","_rst/geometry/location.rst","_rst/geometry/operation_interface.rst","_rst/geometry/simplex.rst","_rst/geometry/union_domain.rst","_rst/label_tensor.rst","_rst/layers/avno_layer.rst","_rst/layers/convolution.rst","_rst/layers/enhanced_linear.rst","_rst/layers/fourier.rst","_rst/layers/fourier_embedding.rst","_rst/layers/lowrank_layer.rst","_rst/layers/pbc_embedding.rst","_rst/layers/pod.rst","_rst/layers/residual.rst","_rst/layers/spectral.rst","_rst/loss/loss_interface.rst","_rst/loss/lploss.rst","_rst/loss/powerloss.rst","_rst/models/avno.rst","_rst/models/base_no.rst","_rst/models/deeponet.rst","_rst/models/fnn.rst","_rst/models/fnn_residual.rst","_rst/models/fno.rst","_rst/models/fourier_kernel.rst","_rst/models/lno.rst","_rst/models/mionet.rst","_rst/models/multifeedforward.rst","_rst/models/network.rst","_rst/operators.rst","_rst/plotter.rst","_rst/problem/abstractproblem.rst","_rst/problem/parametricproblem.rst","_rst/problem/spatialproblem.rst","_rst/problem/timedepproblem.rst","_rst/solvers/basepinn.rst","_rst/solvers/causalpinn.rst","_rst/solvers/competitivepinn.rst","_rst/solvers/garom.rst","_rst/solvers/gpinn.rst","_rst/solvers/pinn.rst","_rst/solvers/rba_pinn.rst","_rst/solvers/rom.rst","_rst/solvers/sapinn.rst","_rst/solvers/solver_interface.rst","_rst/solvers/supervised.rst","_rst/trainer.rst","_rst/tutorials/tutorial1/tutorial.rst","_rst/tutorials/tutorial10/tutorial.rst","_rst/tutorials/tutorial11/tutorial.rst","_rst/tutorials/tutorial12/tutorial.rst","_rst/tutorials/tutorial13/tutorial.rst","_rst/tutorials/tutorial2/tutorial.rst","_rst/tutorials/tutorial3/tutorial.rst","_rst/tutorials/tutorial4/tutorial.rst","_rst/tutorials/tutorial5/tutorial.rst","_rst/tutorials/tutorial6/tutorial.rst","_rst/tutorials/tutorial7/tutorial.rst","_rst/tutorials/tutorial8/tutorial.rst","_rst/tutorials/tutorial9/tutorial.rst","_team.rst","index.rst"],objects:{"pina.adaptive_functions":{adaptive_func_interface:[9,2,0,"-"]},"pina.adaptive_functions.adaptive_func":{AdaptiveCELU:[6,0,1,""],AdaptiveELU:[7,0,1,""],AdaptiveExp:[8,0,1,""],AdaptiveGELU:[10,0,1,""],AdaptiveMish:[11,0,1,""],AdaptiveReLU:[12,0,1,""],AdaptiveSIREN:[13,0,1,""],AdaptiveSiLU:[14,0,1,""],AdaptiveSigmoid:[15,0,1,""],AdaptiveSoftmax:[16,0,1,""],AdaptiveSoftmin:[17,0,1,""],AdaptiveTanh:[18,0,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveCELU":{"double":[6,1,1,""],"float":[6,1,1,""],add_module:[6,1,1,""],alpha:[6,1,1,""],apply:[6,1,1,""],beta:[6,1,1,""],bfloat16:[6,1,1,""],buffers:[6,1,1,""],children:[6,1,1,""],compile:[6,1,1,""],cpu:[6,1,1,""],cuda:[6,1,1,""],eval:[6,1,1,""],extra_repr:[6,1,1,""],forward:[6,1,1,""],func:[6,1,1,""],gamma:[6,1,1,""],get_buffer:[6,1,1,""],get_extra_state:[6,1,1,""],get_parameter:[6,1,1,""],get_submodule:[6,1,1,""],half:[6,1,1,""],ipu:[6,1,1,""],load_state_dict:[6,1,1,""],modules:[6,1,1,""],named_buffers:[6,1,1,""],named_children:[6,1,1,""],named_modules:[6,1,1,""],named_parameters:[6,1,1,""],parameters:[6,1,1,""],register_backward_hook:[6,1,1,""],register_buffer:[6,1,1,""],register_forward_hook:[6,1,1,""],register_forward_pre_hook:[6,1,1,""],register_full_backward_hook:[6,1,1,""],register_full_backward_pre_hook:[6,1,1,""],register_load_state_dict_post_hook:[6,1,1,""],register_module:[6,1,1,""],register_parameter:[6,1,1,""],register_state_dict_pre_hook:[6,1,1,""],requires_grad_:[6,1,1,""],set_extra_state:[6,1,1,""],share_memory:[6,1,1,""],state_dict:[6,1,1,""],to:[6,1,1,""],to_empty:[6,1,1,""],train:[6,1,1,""],type:[6,1,1,""],xpu:[6,1,1,""],zero_grad:[6,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveELU":{"double":[7,1,1,""],"float":[7,1,1,""],add_module:[7,1,1,""],alpha:[7,1,1,""],apply:[7,1,1,""],beta:[7,1,1,""],bfloat16:[7,1,1,""],buffers:[7,1,1,""],children:[7,1,1,""],compile:[7,1,1,""],cpu:[7,1,1,""],cuda:[7,1,1,""],eval:[7,1,1,""],extra_repr:[7,1,1,""],forward:[7,1,1,""],func:[7,1,1,""],gamma:[7,1,1,""],get_buffer:[7,1,1,""],get_extra_state:[7,1,1,""],get_parameter:[7,1,1,""],get_submodule:[7,1,1,""],half:[7,1,1,""],ipu:[7,1,1,""],load_state_dict:[7,1,1,""],modules:[7,1,1,""],named_buffers:[7,1,1,""],named_children:[7,1,1,""],named_modules:[7,1,1,""],named_parameters:[7,1,1,""],parameters:[7,1,1,""],register_backward_hook:[7,1,1,""],register_buffer:[7,1,1,""],register_forward_hook:[7,1,1,""],register_forward_pre_hook:[7,1,1,""],register_full_backward_hook:[7,1,1,""],register_full_backward_pre_hook:[7,1,1,""],register_load_state_dict_post_hook:[7,1,1,""],register_module:[7,1,1,""],register_parameter:[7,1,1,""],register_state_dict_pre_hook:[7,1,1,""],requires_grad_:[7,1,1,""],set_extra_state:[7,1,1,""],share_memory:[7,1,1,""],state_dict:[7,1,1,""],to:[7,1,1,""],to_empty:[7,1,1,""],train:[7,1,1,""],type:[7,1,1,""],xpu:[7,1,1,""],zero_grad:[7,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveExp":{"double":[8,1,1,""],"float":[8,1,1,""],add_module:[8,1,1,""],alpha:[8,1,1,""],apply:[8,1,1,""],beta:[8,1,1,""],bfloat16:[8,1,1,""],buffers:[8,1,1,""],children:[8,1,1,""],compile:[8,1,1,""],cpu:[8,1,1,""],cuda:[8,1,1,""],eval:[8,1,1,""],extra_repr:[8,1,1,""],forward:[8,1,1,""],func:[8,1,1,""],gamma:[8,1,1,""],get_buffer:[8,1,1,""],get_extra_state:[8,1,1,""],get_parameter:[8,1,1,""],get_submodule:[8,1,1,""],half:[8,1,1,""],ipu:[8,1,1,""],load_state_dict:[8,1,1,""],modules:[8,1,1,""],named_buffers:[8,1,1,""],named_children:[8,1,1,""],named_modules:[8,1,1,""],named_parameters:[8,1,1,""],parameters:[8,1,1,""],register_backward_hook:[8,1,1,""],register_buffer:[8,1,1,""],register_forward_hook:[8,1,1,""],register_forward_pre_hook:[8,1,1,""],register_full_backward_hook:[8,1,1,""],register_full_backward_pre_hook:[8,1,1,""],register_load_state_dict_post_hook:[8,1,1,""],register_module:[8,1,1,""],register_parameter:[8,1,1,""],register_state_dict_pre_hook:[8,1,1,""],requires_grad_:[8,1,1,""],set_extra_state:[8,1,1,""],share_memory:[8,1,1,""],state_dict:[8,1,1,""],to:[8,1,1,""],to_empty:[8,1,1,""],train:[8,1,1,""],type:[8,1,1,""],xpu:[8,1,1,""],zero_grad:[8,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveGELU":{"double":[10,1,1,""],"float":[10,1,1,""],add_module:[10,1,1,""],alpha:[10,1,1,""],apply:[10,1,1,""],beta:[10,1,1,""],bfloat16:[10,1,1,""],buffers:[10,1,1,""],children:[10,1,1,""],compile:[10,1,1,""],cpu:[10,1,1,""],cuda:[10,1,1,""],eval:[10,1,1,""],extra_repr:[10,1,1,""],forward:[10,1,1,""],func:[10,1,1,""],gamma:[10,1,1,""],get_buffer:[10,1,1,""],get_extra_state:[10,1,1,""],get_parameter:[10,1,1,""],get_submodule:[10,1,1,""],half:[10,1,1,""],ipu:[10,1,1,""],load_state_dict:[10,1,1,""],modules:[10,1,1,""],named_buffers:[10,1,1,""],named_children:[10,1,1,""],named_modules:[10,1,1,""],named_parameters:[10,1,1,""],parameters:[10,1,1,""],register_backward_hook:[10,1,1,""],register_buffer:[10,1,1,""],register_forward_hook:[10,1,1,""],register_forward_pre_hook:[10,1,1,""],register_full_backward_hook:[10,1,1,""],register_full_backward_pre_hook:[10,1,1,""],register_load_state_dict_post_hook:[10,1,1,""],register_module:[10,1,1,""],register_parameter:[10,1,1,""],register_state_dict_pre_hook:[10,1,1,""],requires_grad_:[10,1,1,""],set_extra_state:[10,1,1,""],share_memory:[10,1,1,""],state_dict:[10,1,1,""],to:[10,1,1,""],to_empty:[10,1,1,""],train:[10,1,1,""],type:[10,1,1,""],xpu:[10,1,1,""],zero_grad:[10,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveMish":{"double":[11,1,1,""],"float":[11,1,1,""],add_module:[11,1,1,""],alpha:[11,1,1,""],apply:[11,1,1,""],beta:[11,1,1,""],bfloat16:[11,1,1,""],buffers:[11,1,1,""],children:[11,1,1,""],compile:[11,1,1,""],cpu:[11,1,1,""],cuda:[11,1,1,""],eval:[11,1,1,""],extra_repr:[11,1,1,""],forward:[11,1,1,""],func:[11,1,1,""],gamma:[11,1,1,""],get_buffer:[11,1,1,""],get_extra_state:[11,1,1,""],get_parameter:[11,1,1,""],get_submodule:[11,1,1,""],half:[11,1,1,""],ipu:[11,1,1,""],load_state_dict:[11,1,1,""],modules:[11,1,1,""],named_buffers:[11,1,1,""],named_children:[11,1,1,""],named_modules:[11,1,1,""],named_parameters:[11,1,1,""],parameters:[11,1,1,""],register_backward_hook:[11,1,1,""],register_buffer:[11,1,1,""],register_forward_hook:[11,1,1,""],register_forward_pre_hook:[11,1,1,""],register_full_backward_hook:[11,1,1,""],register_full_backward_pre_hook:[11,1,1,""],register_load_state_dict_post_hook:[11,1,1,""],register_module:[11,1,1,""],register_parameter:[11,1,1,""],register_state_dict_pre_hook:[11,1,1,""],requires_grad_:[11,1,1,""],set_extra_state:[11,1,1,""],share_memory:[11,1,1,""],state_dict:[11,1,1,""],to:[11,1,1,""],to_empty:[11,1,1,""],train:[11,1,1,""],type:[11,1,1,""],xpu:[11,1,1,""],zero_grad:[11,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveReLU":{"double":[12,1,1,""],"float":[12,1,1,""],add_module:[12,1,1,""],alpha:[12,1,1,""],apply:[12,1,1,""],beta:[12,1,1,""],bfloat16:[12,1,1,""],buffers:[12,1,1,""],children:[12,1,1,""],compile:[12,1,1,""],cpu:[12,1,1,""],cuda:[12,1,1,""],eval:[12,1,1,""],extra_repr:[12,1,1,""],forward:[12,1,1,""],func:[12,1,1,""],gamma:[12,1,1,""],get_buffer:[12,1,1,""],get_extra_state:[12,1,1,""],get_parameter:[12,1,1,""],get_submodule:[12,1,1,""],half:[12,1,1,""],ipu:[12,1,1,""],load_state_dict:[12,1,1,""],modules:[12,1,1,""],named_buffers:[12,1,1,""],named_children:[12,1,1,""],named_modules:[12,1,1,""],named_parameters:[12,1,1,""],parameters:[12,1,1,""],register_backward_hook:[12,1,1,""],register_buffer:[12,1,1,""],register_forward_hook:[12,1,1,""],register_forward_pre_hook:[12,1,1,""],register_full_backward_hook:[12,1,1,""],register_full_backward_pre_hook:[12,1,1,""],register_load_state_dict_post_hook:[12,1,1,""],register_module:[12,1,1,""],register_parameter:[12,1,1,""],register_state_dict_pre_hook:[12,1,1,""],requires_grad_:[12,1,1,""],set_extra_state:[12,1,1,""],share_memory:[12,1,1,""],state_dict:[12,1,1,""],to:[12,1,1,""],to_empty:[12,1,1,""],train:[12,1,1,""],type:[12,1,1,""],xpu:[12,1,1,""],zero_grad:[12,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveSIREN":{"double":[13,1,1,""],"float":[13,1,1,""],add_module:[13,1,1,""],alpha:[13,1,1,""],apply:[13,1,1,""],beta:[13,1,1,""],bfloat16:[13,1,1,""],buffers:[13,1,1,""],children:[13,1,1,""],compile:[13,1,1,""],cpu:[13,1,1,""],cuda:[13,1,1,""],eval:[13,1,1,""],extra_repr:[13,1,1,""],forward:[13,1,1,""],func:[13,1,1,""],gamma:[13,1,1,""],get_buffer:[13,1,1,""],get_extra_state:[13,1,1,""],get_parameter:[13,1,1,""],get_submodule:[13,1,1,""],half:[13,1,1,""],ipu:[13,1,1,""],load_state_dict:[13,1,1,""],modules:[13,1,1,""],named_buffers:[13,1,1,""],named_children:[13,1,1,""],named_modules:[13,1,1,""],named_parameters:[13,1,1,""],parameters:[13,1,1,""],register_backward_hook:[13,1,1,""],register_buffer:[13,1,1,""],register_forward_hook:[13,1,1,""],register_forward_pre_hook:[13,1,1,""],register_full_backward_hook:[13,1,1,""],register_full_backward_pre_hook:[13,1,1,""],register_load_state_dict_post_hook:[13,1,1,""],register_module:[13,1,1,""],register_parameter:[13,1,1,""],register_state_dict_pre_hook:[13,1,1,""],requires_grad_:[13,1,1,""],set_extra_state:[13,1,1,""],share_memory:[13,1,1,""],state_dict:[13,1,1,""],to:[13,1,1,""],to_empty:[13,1,1,""],train:[13,1,1,""],type:[13,1,1,""],xpu:[13,1,1,""],zero_grad:[13,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveSiLU":{"double":[14,1,1,""],"float":[14,1,1,""],add_module:[14,1,1,""],alpha:[14,1,1,""],apply:[14,1,1,""],beta:[14,1,1,""],bfloat16:[14,1,1,""],buffers:[14,1,1,""],children:[14,1,1,""],compile:[14,1,1,""],cpu:[14,1,1,""],cuda:[14,1,1,""],eval:[14,1,1,""],extra_repr:[14,1,1,""],forward:[14,1,1,""],func:[14,1,1,""],gamma:[14,1,1,""],get_buffer:[14,1,1,""],get_extra_state:[14,1,1,""],get_parameter:[14,1,1,""],get_submodule:[14,1,1,""],half:[14,1,1,""],ipu:[14,1,1,""],load_state_dict:[14,1,1,""],modules:[14,1,1,""],named_buffers:[14,1,1,""],named_children:[14,1,1,""],named_modules:[14,1,1,""],named_parameters:[14,1,1,""],parameters:[14,1,1,""],register_backward_hook:[14,1,1,""],register_buffer:[14,1,1,""],register_forward_hook:[14,1,1,""],register_forward_pre_hook:[14,1,1,""],register_full_backward_hook:[14,1,1,""],register_full_backward_pre_hook:[14,1,1,""],register_load_state_dict_post_hook:[14,1,1,""],register_module:[14,1,1,""],register_parameter:[14,1,1,""],register_state_dict_pre_hook:[14,1,1,""],requires_grad_:[14,1,1,""],set_extra_state:[14,1,1,""],share_memory:[14,1,1,""],state_dict:[14,1,1,""],to:[14,1,1,""],to_empty:[14,1,1,""],train:[14,1,1,""],type:[14,1,1,""],xpu:[14,1,1,""],zero_grad:[14,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveSigmoid":{"double":[15,1,1,""],"float":[15,1,1,""],add_module:[15,1,1,""],alpha:[15,1,1,""],apply:[15,1,1,""],beta:[15,1,1,""],bfloat16:[15,1,1,""],buffers:[15,1,1,""],children:[15,1,1,""],compile:[15,1,1,""],cpu:[15,1,1,""],cuda:[15,1,1,""],eval:[15,1,1,""],extra_repr:[15,1,1,""],forward:[15,1,1,""],func:[15,1,1,""],gamma:[15,1,1,""],get_buffer:[15,1,1,""],get_extra_state:[15,1,1,""],get_parameter:[15,1,1,""],get_submodule:[15,1,1,""],half:[15,1,1,""],ipu:[15,1,1,""],load_state_dict:[15,1,1,""],modules:[15,1,1,""],named_buffers:[15,1,1,""],named_children:[15,1,1,""],named_modules:[15,1,1,""],named_parameters:[15,1,1,""],parameters:[15,1,1,""],register_backward_hook:[15,1,1,""],register_buffer:[15,1,1,""],register_forward_hook:[15,1,1,""],register_forward_pre_hook:[15,1,1,""],register_full_backward_hook:[15,1,1,""],register_full_backward_pre_hook:[15,1,1,""],register_load_state_dict_post_hook:[15,1,1,""],register_module:[15,1,1,""],register_parameter:[15,1,1,""],register_state_dict_pre_hook:[15,1,1,""],requires_grad_:[15,1,1,""],set_extra_state:[15,1,1,""],share_memory:[15,1,1,""],state_dict:[15,1,1,""],to:[15,1,1,""],to_empty:[15,1,1,""],train:[15,1,1,""],type:[15,1,1,""],xpu:[15,1,1,""],zero_grad:[15,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveSoftmax":{"double":[16,1,1,""],"float":[16,1,1,""],add_module:[16,1,1,""],alpha:[16,1,1,""],apply:[16,1,1,""],beta:[16,1,1,""],bfloat16:[16,1,1,""],buffers:[16,1,1,""],children:[16,1,1,""],compile:[16,1,1,""],cpu:[16,1,1,""],cuda:[16,1,1,""],eval:[16,1,1,""],extra_repr:[16,1,1,""],forward:[16,1,1,""],func:[16,1,1,""],gamma:[16,1,1,""],get_buffer:[16,1,1,""],get_extra_state:[16,1,1,""],get_parameter:[16,1,1,""],get_submodule:[16,1,1,""],half:[16,1,1,""],ipu:[16,1,1,""],load_state_dict:[16,1,1,""],modules:[16,1,1,""],named_buffers:[16,1,1,""],named_children:[16,1,1,""],named_modules:[16,1,1,""],named_parameters:[16,1,1,""],parameters:[16,1,1,""],register_backward_hook:[16,1,1,""],register_buffer:[16,1,1,""],register_forward_hook:[16,1,1,""],register_forward_pre_hook:[16,1,1,""],register_full_backward_hook:[16,1,1,""],register_full_backward_pre_hook:[16,1,1,""],register_load_state_dict_post_hook:[16,1,1,""],register_module:[16,1,1,""],register_parameter:[16,1,1,""],register_state_dict_pre_hook:[16,1,1,""],requires_grad_:[16,1,1,""],set_extra_state:[16,1,1,""],share_memory:[16,1,1,""],state_dict:[16,1,1,""],to:[16,1,1,""],to_empty:[16,1,1,""],train:[16,1,1,""],type:[16,1,1,""],xpu:[16,1,1,""],zero_grad:[16,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveSoftmin":{"double":[17,1,1,""],"float":[17,1,1,""],add_module:[17,1,1,""],alpha:[17,1,1,""],apply:[17,1,1,""],beta:[17,1,1,""],bfloat16:[17,1,1,""],buffers:[17,1,1,""],children:[17,1,1,""],compile:[17,1,1,""],cpu:[17,1,1,""],cuda:[17,1,1,""],eval:[17,1,1,""],extra_repr:[17,1,1,""],forward:[17,1,1,""],func:[17,1,1,""],gamma:[17,1,1,""],get_buffer:[17,1,1,""],get_extra_state:[17,1,1,""],get_parameter:[17,1,1,""],get_submodule:[17,1,1,""],half:[17,1,1,""],ipu:[17,1,1,""],load_state_dict:[17,1,1,""],modules:[17,1,1,""],named_buffers:[17,1,1,""],named_children:[17,1,1,""],named_modules:[17,1,1,""],named_parameters:[17,1,1,""],parameters:[17,1,1,""],register_backward_hook:[17,1,1,""],register_buffer:[17,1,1,""],register_forward_hook:[17,1,1,""],register_forward_pre_hook:[17,1,1,""],register_full_backward_hook:[17,1,1,""],register_full_backward_pre_hook:[17,1,1,""],register_load_state_dict_post_hook:[17,1,1,""],register_module:[17,1,1,""],register_parameter:[17,1,1,""],register_state_dict_pre_hook:[17,1,1,""],requires_grad_:[17,1,1,""],set_extra_state:[17,1,1,""],share_memory:[17,1,1,""],state_dict:[17,1,1,""],to:[17,1,1,""],to_empty:[17,1,1,""],train:[17,1,1,""],type:[17,1,1,""],xpu:[17,1,1,""],zero_grad:[17,1,1,""]},"pina.adaptive_functions.adaptive_func.AdaptiveTanh":{"double":[18,1,1,""],"float":[18,1,1,""],add_module:[18,1,1,""],alpha:[18,1,1,""],apply:[18,1,1,""],beta:[18,1,1,""],bfloat16:[18,1,1,""],buffers:[18,1,1,""],children:[18,1,1,""],compile:[18,1,1,""],cpu:[18,1,1,""],cuda:[18,1,1,""],eval:[18,1,1,""],extra_repr:[18,1,1,""],forward:[18,1,1,""],func:[18,1,1,""],gamma:[18,1,1,""],get_buffer:[18,1,1,""],get_extra_state:[18,1,1,""],get_parameter:[18,1,1,""],get_submodule:[18,1,1,""],half:[18,1,1,""],ipu:[18,1,1,""],load_state_dict:[18,1,1,""],modules:[18,1,1,""],named_buffers:[18,1,1,""],named_children:[18,1,1,""],named_modules:[18,1,1,""],named_parameters:[18,1,1,""],parameters:[18,1,1,""],register_backward_hook:[18,1,1,""],register_buffer:[18,1,1,""],register_forward_hook:[18,1,1,""],register_forward_pre_hook:[18,1,1,""],register_full_backward_hook:[18,1,1,""],register_full_backward_pre_hook:[18,1,1,""],register_load_state_dict_post_hook:[18,1,1,""],register_module:[18,1,1,""],register_parameter:[18,1,1,""],register_state_dict_pre_hook:[18,1,1,""],requires_grad_:[18,1,1,""],set_extra_state:[18,1,1,""],share_memory:[18,1,1,""],state_dict:[18,1,1,""],to:[18,1,1,""],to_empty:[18,1,1,""],train:[18,1,1,""],type:[18,1,1,""],xpu:[18,1,1,""],zero_grad:[18,1,1,""]},"pina.adaptive_functions.adaptive_func_interface":{AdaptiveActivationFunctionInterface:[9,0,1,""]},"pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface":{alpha:[9,1,1,""],beta:[9,1,1,""],forward:[9,1,1,""],func:[9,1,1,""],gamma:[9,1,1,""]},"pina.callbacks.adaptive_refinment_callbacks":{R3Refinement:[19,0,1,""]},"pina.callbacks.adaptive_refinment_callbacks.R3Refinement":{on_train_epoch_end:[19,1,1,""],on_train_start:[19,1,1,""]},"pina.callbacks.optimizer_callbacks":{SwitchOptimizer:[20,0,1,""]},"pina.callbacks.optimizer_callbacks.SwitchOptimizer":{on_train_epoch_start:[20,1,1,""]},"pina.callbacks.processing_callbacks":{MetricTracker:[21,0,1,""]},"pina.callbacks.processing_callbacks.MetricTracker":{metrics:[21,1,1,""],on_train_epoch_end:[21,1,1,""]},"pina.condition":{Condition:[22,0,1,""]},"pina.equation.equation":{Equation:[23,0,1,""]},"pina.equation.equation.Equation":{residual:[23,1,1,""]},"pina.equation.equation_factory":{FixedFlux:[23,0,1,""],FixedGradient:[23,0,1,""],FixedValue:[23,0,1,""],Laplace:[23,0,1,""]},"pina.equation.equation_interface":{EquationInterface:[23,0,1,""]},"pina.equation.equation_interface.EquationInterface":{residual:[23,1,1,""]},"pina.equation.system_equation":{SystemEquation:[23,0,1,""]},"pina.equation.system_equation.SystemEquation":{residual:[23,1,1,""]},"pina.geometry":{cartesian:[24,2,0,"-"],difference_domain:[25,2,0,"-"],ellipsoid:[26,2,0,"-"],exclusion_domain:[27,2,0,"-"],intersection_domain:[28,2,0,"-"],location:[29,2,0,"-"],operation_interface:[30,2,0,"-"],simplex:[31,2,0,"-"],union_domain:[32,2,0,"-"]},"pina.geometry.difference_domain":{Difference:[25,0,1,""]},"pina.geometry.difference_domain.Difference":{is_inside:[25,1,1,""],sample:[25,1,1,""]},"pina.geometry.exclusion_domain":{Exclusion:[27,0,1,""]},"pina.geometry.exclusion_domain.Exclusion":{is_inside:[27,1,1,""],sample:[27,1,1,""]},"pina.geometry.intersection_domain":{Intersection:[28,0,1,""]},"pina.geometry.intersection_domain.Intersection":{is_inside:[28,1,1,""],sample:[28,1,1,""]},"pina.geometry.location":{Location:[29,0,1,""]},"pina.geometry.location.Location":{is_inside:[29,1,1,""],sample:[29,1,1,""]},"pina.geometry.operation_interface":{OperationInterface:[30,0,1,""]},"pina.geometry.operation_interface.OperationInterface":{geometries:[30,1,1,""],is_inside:[30,1,1,""],variables:[30,1,1,""]},"pina.geometry.union_domain":{Union:[32,0,1,""]},"pina.geometry.union_domain.Union":{is_inside:[32,1,1,""],sample:[32,1,1,""]},"pina.label_tensor":{LabelTensor:[33,0,1,""]},"pina.label_tensor.LabelTensor":{append:[33,1,1,""],clone:[33,1,1,""],cpu:[33,1,1,""],cuda:[33,1,1,""],detach:[33,1,1,""],extract:[33,1,1,""],labels:[33,1,1,""],requires_grad_:[33,1,1,""],select:[33,1,1,""],to:[33,1,1,""],vstack:[33,1,1,""]},"pina.loss":{LossInterface:[44,0,1,""],LpLoss:[45,0,1,""],PowerLoss:[46,0,1,""]},"pina.loss.LossInterface":{forward:[44,1,1,""]},"pina.loss.LpLoss":{forward:[45,1,1,""]},"pina.loss.PowerLoss":{forward:[46,1,1,""]},"pina.model":{network:[57,2,0,"-"]},"pina.model.avno":{AveragingNeuralOperator:[47,0,1,""]},"pina.model.avno.AveragingNeuralOperator":{forward:[47,1,1,""]},"pina.model.base_no":{KernelNeuralOperator:[48,0,1,""]},"pina.model.base_no.KernelNeuralOperator":{forward:[48,1,1,""],integral_kernels:[48,1,1,""],lifting_operator:[48,1,1,""],projection_operator:[48,1,1,""]},"pina.model.deeponet":{DeepONet:[49,0,1,""],MIONet:[55,0,1,""]},"pina.model.deeponet.DeepONet":{branch_net:[49,1,1,""],forward:[49,1,1,""],trunk_net:[49,1,1,""]},"pina.model.deeponet.MIONet":{aggregator:[55,1,1,""],forward:[55,1,1,""],indeces_variables_extracted:[55,1,1,""],model:[55,1,1,""],reduction:[55,1,1,""],scale:[55,1,1,""],translation:[55,1,1,""]},"pina.model.feed_forward":{FeedForward:[50,0,1,""],ResidualFeedForward:[51,0,1,""]},"pina.model.feed_forward.FeedForward":{forward:[50,1,1,""]},"pina.model.feed_forward.ResidualFeedForward":{forward:[51,1,1,""]},"pina.model.fno":{FNO:[52,0,1,""],FourierIntegralKernel:[53,0,1,""]},"pina.model.fno.FNO":{forward:[52,1,1,""]},"pina.model.fno.FourierIntegralKernel":{forward:[53,1,1,""]},"pina.model.layers.embedding":{FourierFeatureEmbedding:[38,0,1,""],PeriodicBoundaryEmbedding:[40,0,1,""]},"pina.model.layers.embedding.FourierFeatureEmbedding":{forward:[38,1,1,""],sigma:[38,1,1,""]},"pina.model.layers.embedding.PeriodicBoundaryEmbedding":{forward:[40,1,1,""],period:[40,1,1,""]},"pina.model.layers.fourier":{FourierBlock1D:[37,0,1,""],FourierBlock2D:[37,0,1,""],FourierBlock3D:[37,0,1,""]},"pina.model.layers.fourier.FourierBlock1D":{forward:[37,1,1,""]},"pina.model.layers.fourier.FourierBlock2D":{forward:[37,1,1,""]},"pina.model.layers.fourier.FourierBlock3D":{forward:[37,1,1,""]},"pina.model.layers.pod":{PODBlock:[41,0,1,""]},"pina.model.layers.pod.PODBlock":{basis:[41,1,1,""],expand:[41,1,1,""],fit:[41,1,1,""],forward:[41,1,1,""],rank:[41,1,1,""],reduce:[41,1,1,""],scale_coefficients:[41,1,1,""],scaler:[41,1,1,""]},"pina.model.layers.residual":{ResidualBlock:[42,0,1,""]},"pina.model.layers.residual.ResidualBlock":{forward:[42,1,1,""]},"pina.model.layers.spectral":{SpectralConvBlock1D:[43,0,1,""],SpectralConvBlock2D:[43,0,1,""],SpectralConvBlock3D:[43,0,1,""]},"pina.model.layers.spectral.SpectralConvBlock1D":{forward:[43,1,1,""]},"pina.model.layers.spectral.SpectralConvBlock2D":{forward:[43,1,1,""]},"pina.model.layers.spectral.SpectralConvBlock3D":{forward:[43,1,1,""]},"pina.model.lno":{LowRankNeuralOperator:[54,0,1,""]},"pina.model.lno.LowRankNeuralOperator":{forward:[54,1,1,""]},"pina.model.multi_feed_forward":{MultiFeedForward:[56,0,1,""]},"pina.model.network":{Network:[57,0,1,""]},"pina.model.network.Network":{forward:[57,1,1,""],forward_map:[57,1,1,""]},"pina.operators":{advection:[58,3,1,""],div:[58,3,1,""],grad:[58,3,1,""],laplacian:[58,3,1,""]},"pina.problem":{abstract_problem:[60,2,0,"-"],parametric_problem:[61,2,0,"-"],spatial_problem:[62,2,0,"-"],timedep_problem:[63,2,0,"-"]},"pina.problem.abstract_problem":{AbstractProblem:[60,0,1,""]},"pina.problem.abstract_problem.AbstractProblem":{add_points:[60,1,1,""],conditions:[60,1,1,""],discretise_domain:[60,1,1,""],domain:[60,1,1,""],have_sampled_points:[60,1,1,""],input_variables:[60,1,1,""],not_sampled_points:[60,1,1,""],output_variables:[60,1,1,""]},"pina.problem.parametric_problem":{ParametricProblem:[61,0,1,""]},"pina.problem.parametric_problem.ParametricProblem":{parameter_domain:[61,1,1,""],parameters:[61,1,1,""]},"pina.problem.spatial_problem":{SpatialProblem:[62,0,1,""]},"pina.problem.spatial_problem.SpatialProblem":{spatial_domain:[62,1,1,""],spatial_variables:[62,1,1,""]},"pina.problem.timedep_problem":{TimeDependentProblem:[63,0,1,""]},"pina.problem.timedep_problem.TimeDependentProblem":{temporal_domain:[63,1,1,""],temporal_variable:[63,1,1,""]},"pina.solvers.garom":{GAROM:[67,0,1,""]},"pina.solvers.garom.GAROM":{configure_optimizers:[67,1,1,""],forward:[67,1,1,""],training_step:[67,1,1,""]},"pina.solvers.pinns.basepinn":{PINNInterface:[64,0,1,""]},"pina.solvers.pinns.basepinn.PINNInterface":{compute_residual:[64,1,1,""],current_condition_name:[64,1,1,""],loss:[64,1,1,""],loss_data:[64,1,1,""],loss_phys:[64,1,1,""],on_train_epoch_end:[64,1,1,""],store_log:[64,1,1,""],training_step:[64,1,1,""]},"pina.solvers.pinns.causalpinn":{CausalPINN:[65,0,1,""]},"pina.solvers.pinns.causalpinn.CausalPINN":{eps:[65,1,1,""],loss_phys:[65,1,1,""]},"pina.solvers.pinns.competitive_pinn":{CompetitivePINN:[66,0,1,""]},"pina.solvers.pinns.competitive_pinn.CompetitivePINN":{configure_optimizers:[66,1,1,""],discriminator:[66,1,1,""],forward:[66,1,1,""],loss_data:[66,1,1,""],loss_phys:[66,1,1,""],neural_net:[66,1,1,""],on_train_batch_end:[66,1,1,""],optimizer_discriminator:[66,1,1,""],optimizer_model:[66,1,1,""],scheduler_discriminator:[66,1,1,""],scheduler_model:[66,1,1,""]},"pina.solvers.pinns.gpinn":{GPINN:[68,0,1,""]},"pina.solvers.pinns.gpinn.GPINN":{loss_phys:[68,1,1,""]},"pina.solvers.pinns.pinn":{PINN:[69,0,1,""]},"pina.solvers.pinns.pinn.PINN":{configure_optimizers:[69,1,1,""],forward:[69,1,1,""],loss_phys:[69,1,1,""],neural_net:[69,1,1,""],scheduler:[69,1,1,""]},"pina.solvers.pinns.rbapinn":{RBAPINN:[70,0,1,""]},"pina.solvers.pinns.rbapinn.RBAPINN":{loss_phys:[70,1,1,""]},"pina.solvers.pinns.sapinn":{SAPINN:[72,0,1,""]},"pina.solvers.pinns.sapinn.SAPINN":{configure_optimizers:[72,1,1,""],forward:[72,1,1,""],loss_data:[72,1,1,""],loss_phys:[72,1,1,""],neural_net:[72,1,1,""],on_load_checkpoint:[72,1,1,""],on_train_batch_end:[72,1,1,""],on_train_start:[72,1,1,""],optimizer_model:[72,1,1,""],optimizer_weights:[72,1,1,""],scheduler_model:[72,1,1,""],scheduler_weights:[72,1,1,""],weights_dict:[72,1,1,""]},"pina.solvers.rom":{ReducedOrderModelSolver:[71,0,1,""]},"pina.solvers.rom.ReducedOrderModelSolver":{forward:[71,1,1,""],loss_data:[71,1,1,""],neural_net:[71,1,1,""]},"pina.solvers.solver":{SolverInterface:[73,0,1,""]},"pina.solvers.solver.SolverInterface":{configure_optimizers:[73,1,1,""],forward:[73,1,1,""],models:[73,1,1,""],on_train_start:[73,1,1,""],optimizers:[73,1,1,""],problem:[73,1,1,""],training_step:[73,1,1,""]},"pina.solvers.supervised":{SupervisedSolver:[74,0,1,""]},"pina.solvers.supervised.SupervisedSolver":{configure_optimizers:[74,1,1,""],forward:[74,1,1,""],loss:[74,1,1,""],loss_data:[74,1,1,""],neural_net:[74,1,1,""],scheduler:[74,1,1,""],training_step:[74,1,1,""]},"pina.trainer":{Trainer:[75,0,1,""]},"pina.trainer.Trainer":{solver:[75,1,1,""],train:[75,1,1,""]},pina:{loss:[44,2,0,"-"],operators:[58,2,0,"-"],trainer:[75,2,0,"-"]}},objnames:{"0":["py","class","Python class"],"1":["py","method","Python method"],"2":["py","module","Python module"],"3":["py","function","Python function"]},objtypes:{"0":"py:class","1":"py:method","2":"py:module","3":"py:function"},terms:{"0246e":33,"02e":81,"03e":81,"04it":78,"0656e":33,"0674e":76,"08e":78,"09e":[81,82],"0it":84,"100it":84,"12e":78,"13e":[80,81],"14e":82,"17e":[78,81],"1841e":33,"18e":81,"1986e":33,"19e":78,"1it":[81,82],"2065e":33,"20l":[6,7,8,10,11,12,13,14,15,16,17,18],"20m1318043":51,"21e":78,"21it":80,"22m1477751":55,"2392e":33,"25e":82,"26e":81,"27e":78,"27it":78,"28it":84,"29e":81,"2_2":[65,66,68,69,71,72,74],"33it":81,"3500e":33,"36it":87,"3824e":33,"38e":80,"39e":78,"4268e":33,"44e":78,"46it":78,"47it":88,"48e":78,"49e":78,"49it":78,"5179e":33,"5446e":33,"54e":81,"55e":81,"56e":78,"56it":77,"58it":78,"593161e":87,"59it":77,"5_0":86,"60it":78,"6150e":33,"61e":80,"62it":81,"640x480":76,"65it":84,"66e":81,"66it":80,"69e":81,"69it":82,"7043e":33,"7116e":33,"73e":81,"74e":81,"75e":78,"76e":81,"76it":84,"77e":78,"77it":80,"78it":82,"7th":[6,7,8,9,10,11,12,13,14,15,16,17,18],"80it":78,"8194e":33,"81it":[81,84],"82e":81,"84e":81,"84it":78,"85it":78,"865598e":87,"8892e":33,"88e":80,"91e":80,"9266e":33,"93e":78,"9452e":33,"94e":81,"94it":81,"97e":[78,82],"99it":78,"abstract":[23,29,30,44,60,61,62,63,64,73],"boolean":29,"break":[6,7,8,10,11,12,13,14,15,16,17,18],"case":[7,22,24,33,35,38,45,46,58,65,66,68,69,70,71,72,73,76,77,79,80,81,82,83,84,85,86,88],"class":[5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,77,78,80,81,82,83,84,86,87,88],"default":[6,7,8,9,10,11,12,13,14,15,16,17,18,23,24,25,26,27,28,29,31,32,33,34,35,39,40,41,42,44,45,46,47,49,50,51,52,53,54,55,57,58,59,60,64,65,66,67,68,69,70,71,72,73,74,75,76,78,79,82,83,85],"final":[3,24,37,47,48,49,52,53,54,55,76,78,80,81,84,89],"float":[6,7,8,9,10,11,12,13,14,15,16,17,18,23,36,38,40,65,67,70,71,74,77,83,84,87],"function":[6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,33,34,35,36,37,39,40,42,44,45,46,47,48,49,50,51,52,53,54,55,57,58,59,64,65,66,67,68,69,70,71,72,73,74,76,77,78,79,80,84,85,88],"import":[3,33,61,62,63,76,77,78,79,80,81,82,83,84,85,86,87,88],"int":[6,7,8,10,11,12,13,14,15,16,17,18,19,20,24,25,26,27,28,31,32,34,35,37,38,39,40,41,42,43,45,46,47,49,50,51,52,53,54,55,59,60,64,65,66,67,70,72,74,75,78,86,87],"new":[3,6,7,8,10,11,12,13,14,15,16,17,18,19,20,24,33,64,77,80,81,82,85,87],"public":1,"return":[6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,57,58,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,85,86,87,88],"short":76,"static":[33,78,81],"super":[35,73,77,79,80,81,82,83,85,87],"switch":[20,73,78],"throw":[6,7,8,10,11,12,13,14,15,16,17,18],"true":[6,7,8,10,11,12,13,14,15,16,17,18,24,25,26,27,28,30,31,32,33,35,39,41,45,46,49,50,51,54,55,59,64,66,67,71,72,73,74,76,77,78,80,81,83,84,85,86,88],"try":[77,78,80,83,85],"var":[6,7,8,10,11,12,13,14,15,16,17,18,86],"while":[4,6,7,8,10,11,12,13,14,15,16,17,18,23,40,65,78,83,85,90],AND:0,Adding:[24,60],And:[6,7,8,10,11,12,13,14,15,16,17,18],Axes:76,BUT:0,But:73,FOR:0,For:[6,7,8,10,11,12,13,14,15,16,17,18,26,31,33,34,35,39,48,71,73,76,78,80,81,82,83,85,88],Its:[6,7,8,10,11,12,13,14,15,16,17,18],MPS:[58,78],NOT:[0,78],NOs:90,ODE:[61,62,76],One:86,Such:[33,87],THE:0,That:83,The:[0,2,3,4,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,28,30,33,34,35,36,37,38,39,40,41,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,80,83,84,85,86,87,90],Their:81,Then:[47,48,52,53,54,86,87],There:[3,76,77,78,80,81,82,83,88,89],These:[6,7,8,10,11,12,13,14,15,16,17,18,58,76,81,86],USE:0,Use:83,Uses:31,Using:35,WITH:0,__call__:[6,7,8,10,11,12,13,14,15,16,17,18],__init:[26,31],__init__:[23,24,26,30,35,73,78,79,80,81,82,83,85,87],__version__:87,_collect:21,_loss:44,_lrschedul:[66,72],_net:35,_rst:80,_sample_everi:19,a3055:51,a3081:51,a3490:55,a351:55,a_k:77,abil:83,abl:[3,77,78,81,82,83,90],about:[3,85],abov:[0,6,7,8,10,11,12,13,14,15,16,17,18,77,79,80,83,86],abovement:81,abs:[37,77,87,88],absolut:[77,88],abstract_problem:[22,61,62,63],abstractproblem:[2,22,23,57,59,61,62,63,64,65,66,67,68,69,70,71,72,73,74,76,77,83,84],academ:1,acceler:[6,7,8,9,10,11,12,13,14,15,16,17,18,76,77,80,81,82,83,84,86,87,88],acceller:58,accept:[3,6,7,8,10,11,12,13,14,15,16,17,18,83],access:[6,7,8,10,11,12,13,14,15,16,17,18,76,78,85],accompani:3,accomplish:87,accumul:[19,78,90],accumulate_grad_batch:73,accuraci:[76,77,80,81,82,83,87,88],achiev:[6,7,8,10,11,12,13,14,15,16,17,18,86],acknowledg:[1,89],aco:83,across:[2,24,78,86,90],act:51,action:[0,79,88],activ:[6,7,8,9,10,11,12,13,14,15,16,17,18,34,35,36,37,39,42,47,50,51,52,53,54,77,78],actual:[76,87],adam:[65,66,67,68,69,70,71,72,73,74,83,87],adap:72,adapt:[6,7,8,9,10,11,12,13,14,15,16,17,18,72,80],adaptive_func_interfac:[6,7,8,10,11,12,13,14,15,16,17,18],adaptive_funct:[6,7,8,10,11,12,13,14,15,16,17,18],adaptiveactivationfunctioninterfac:[6,7,8,10,11,12,13,14,15,16,17,18],add:[6,7,8,10,11,12,13,14,15,16,17,18,57,59,60,78,85,90],add_modul:[6,7,8,10,11,12,13,14,15,16,17,18],add_point:60,added:[6,7,8,10,11,12,13,14,15,16,17,18,37,81],adding:78,addit:[6,7,8,10,11,12,13,14,15,16,17,18,61,64,65,68,69,70,71,72,73,74,75,79,81,83,90],address:81,adjust:88,adp:9,advanc:[1,76,78,87,90],advect:58,adversari:67,affect:[6,7,8,10,11,12,13,14,15,16,17,18,33,81],affin:[34,39],after:[6,7,8,10,11,12,13,14,15,16,17,18,21,30,36,39,40,41,42,50,51,54,73,76,78,81,82,86],again:[76,80],against:[6,7,8,10,11,12,13,14,15,16,17,18,64,66,71,72,74],aggreg:[21,23,49,55],agre:77,aim:[2,65,66,68,69,70,71,72,74,76,78,79,80,87,90],aka:[59,87],algorithm:[19,26,31,35,67],alia:[6,7,8,10,11,12,13,14,15,16,17,18],all:[0,6,7,8,10,11,12,13,14,15,16,17,18,19,23,24,25,26,27,28,30,31,32,33,40,41,44,45,46,48,57,58,59,60,64,72,73,75,76,77,78,80,81,82,83,84,85,86,88,89],allow:[6,7,8,10,11,12,13,14,15,16,17,18,20,33,56,76,78,80,85],almost:[3,81],along:87,alongsid:[6,7,8,10,11,12,13,14,15,16,17,18],alpha:[6,7,8,9,10,11,12,13,14,15,16,17,18,61,62,81,85],alreadi:[3,6,7,8,10,11,12,13,14,15,16,17,18,23,60,77,78,80,82,83],also:[3,6,7,8,10,11,12,13,14,15,16,17,18,24,26,31,32,33,40,50,51,67,73,76,78,79,80,83,85,86,87,88,89],alwai:[3,6,7,8,10,11,12,13,14,15,16,17,18,24,35,78,80,81,83],always_cal:[6,7,8,10,11,12,13,14,15,16,17,18],amaz:78,ambient:89,ameya:[6,7,8,9,10,11,12,13,14,15,16,17,18],among:[6,7,8,9,10,11,12,13,14,15,16,17,18,61,85],anagnostopoulo:70,analog:79,analysi:81,analyt:[76,88],analyz:81,anandkumar:[37,39,48,52,53,54],ani:[0,3,6,7,8,10,11,12,13,14,15,16,17,18,23,29,30,33,39,50,51,54,57,58,66,72,73,76,77,78,87,88,90],anna:[1,89],anneal:65,anoth:81,anymor:4,anytim:64,api:[2,75,76,90],append:[33,78,85],appli:[6,7,8,10,11,12,13,14,15,16,17,18,23,30,36,37,38,40,42,44,45,46,47,48,50,51,52,53,54,57,60,65,68,70,76,78,80,83,87,88],applic:[39,48,54,66,72,78,87],appreciabili:81,approach:[71,78,80,81,84,87,90],appropri:72,approx:[48,88],approxim:[34,39,40,47,48,49,54,67,74,77,80,82,83,87,88,90],arang:83,arbitrari:[45,46,78,88,90],architectur:[47,49,52,54,55,66,77,80,83,87],archiutectur:87,aren:3,arg:[6,7,8,10,11,12,13,14,15,16,17,18,22,33,44,64,65,66,67,68,69,70,71,72,73,74],argument:[6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,73,75,78,81,85],aris:0,arka:19,aroma:89,around:[78,80,85],arounf:23,arrai:83,articl:1,arxiv:[6,7,8,9,10,11,12,13,14,15,16,17,18,19,34,37,40,42,47,52,53,67],ask:88,aspect:[75,77],assert:[76,77,80,81,82,88],assign:[3,6,7,8,10,11,12,13,14,15,16,17,18,79],associ:[0,6,7,8,10,11,12,13,14,15,16,17,18,66,72,73],assum:[40,71],asynchron:[6,7,8,10,11,12,13,14,15,16,17,18],attent:70,attr:87,attribut:[6,7,8,10,11,12,13,14,15,16,17,18,33,76,87],attributeerror:[6,7,8,10,11,12,13,14,15,16,17,18],augment:[38,40,57,64,65,68,69,70,71,72,73,74,88],author:[0,1,83,84],auto:77,autoencod:[5,71],autograd:[6,7,8,10,11,12,13,14,15,16,17,18,33,81],automat:[23,35,64,73,78,81,87],automatic_optim:73,avail:[2,24,25,26,27,28,31,32,49,55,59,60,73,76,77,78,79,80,83,84,85,88],avalu:[47,54],averag:[2,5,6,7,8,10,11,12,13,14,15,16,17,18,39,54,64,71,74,78],averagingneuraloper:[2,47,77],avnoblock:34,avoid:[6,7,8,10,11,12,13,14,15,16,17,18,41,45,46,76,77,78,80,81,82,83,84,88],axes:[83,88],axi:[59,86],axs:[77,85,87],azizzadenesh:[37,39,48,52,53,54],back:[37,43,77,87],backhand:90,backward:[6,7,8,10,11,12,13,14,15,16,17,18,33,73,83],backward_pr:[6,7,8,10,11,12,13,14,15,16,17,18],ballarin:87,banach:55,bar:[26,73,78],bare:76,barycentr:31,barycentric_coordinate_system:31,base:[6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,85,90],base_no:[47,52,54],basecontinuousconv:35,basepinn:[66,69,72],bash:3,basi:[39,41,54],basic:[77,80,83,87],batch:[37,43,45,46,52,53,64,66,67,72,73,74,75,83],batch_idx:[64,66,67,72,73,74],batch_siz:[34,35,39,47,48,54,75,77,83,84,87],batchnorm:[6,7,8,10,11,12,13,14,15,16,17,18],beatriz:71,becaus:[33,76,78,85],becom:77,been:[1,80,89],befor:[6,7,8,10,11,12,13,14,15,16,17,18,41,49,55,76,78,80,81,85,86],begin:[7,33,45,46,65,66,68,69,70,71,72,76,77,79,80,81,82,83,84,86,88],behavior:[6,7,8,10,11,12,13,14,15,16,17,18,78],behaviour:80,being:[3,6,7,8,10,11,12,13,14,15,16,17,18,41,44,85],belong:[85,86],below:[6,7,8,10,11,12,13,14,15,16,17,18,24,60,73,76,78,80,81],benchmark:[76,78],best:[3,78],beta:[6,7,8,9,10,11,12,13,14,15,16,17,18,81],better:[78,81,82,84,86],between:[6,7,8,10,11,12,13,14,15,16,17,18,20,28,39,45,46,48,54,64,66,71,72,73,74,76,77,81,83],bfloat16:[6,7,8,10,11,12,13,14,15,16,17,18],bhattacharya:[37,39,48,52,53,54],bia:[6,7,8,10,11,12,13,14,15,16,17,18,34,35,38,39,49,50,51,54,55,77,80,81],bibtex:1,big:80,binari:4,bit:73,block:[34,37,39,42,43,47,52,53,54,83],blue:[85,88],bool:[6,7,8,10,11,12,13,14,15,16,17,18,24,25,26,27,28,29,30,31,32,33,35,39,41,42,45,46,49,50,51,54,55,59,67],border:[25,27,28,29,30,77,85],both:[6,7,8,10,11,12,13,14,15,16,17,18,35,37,39,49,55,56,57,65,66,68,69,70,72,76,83,85,86,90],bottleneck:70,bound1:60,bound:[6,7,8,10,11,12,13,14,15,16,17,18,85],boundari:[2,5,22,23,77,79,81,82,84,85,86],braga:72,branch:[3,49,55],branch_net1:55,branch_net2:55,branch_net:49,buf:[6,7,8,10,11,12,13,14,15,16,17,18],buffer:[6,7,8,10,11,12,13,14,15,16,17,18],bug:3,build:[2,5,6,7,8,10,11,12,13,14,15,16,17,18,41,77,78,79,80,82],built:[2,4,77,78,83],burger_equ:79,burgers1d:79,burgers1dequ:79,c_e_b_u_point:85,c_e_nb_d_point:85,c_e_nb_u_point:85,calcul:[19,23,45,46,58,64,65,66,68,69,70,72,73,76,83,84],call:[6,7,8,9,10,11,12,13,14,15,16,17,18,19,35,49,50,51,55,64,66,71,72,73,76,78,79,80,87],callaback:78,callabl:[6,7,8,9,10,11,12,13,14,15,16,17,18,23,49,55],callback:[65,73,76,81,86,90],callbacks_:2,caller:[6,7,8,10,11,12,13,14,15,16,17,18],cam:81,can:[2,3,4,6,7,8,10,11,12,13,14,15,16,17,18,20,22,23,35,38,40,45,46,47,48,49,52,54,55,64,65,66,68,69,70,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,90],cannot:80,cap:28,captur:[78,80],care:64,cart_ellipse_b_union:85,cart_ellipse_nb_differ:85,cart_ellipse_nb_union:85,cartesian1:[25,27,28,32],cartesian2:[25,27,28,32],cartesian:[25,27,28,85],cartesian_dict:24,cartesian_sampl:85,cartesiandomain:[2,25,27,28,30,32,60,61,62,63,76,78,79,80,81,82,85,86,87,88],cartesiandomain_pt:81,cartesianproblem:76,cast:[6,7,8,10,11,12,13,14,15,16,17,18,57,87],cat:[80,83],causal:65,causalpinn:2,cconv:83,cctivat:42,cdot:[40,48,82,83,84,88],cdoubl:[6,7,8,10,11,12,13,14,15,16,17,18],cell:81,celu:[2,6],center:[81,83],centeri:83,centerx:83,centroid:83,certain:[6,7,8,10,11,12,13,14,15,16,17,18,78],cfd:89,challeng:[77,80],chang:[3,6,7,8,10,11,12,13,14,15,16,17,18,33,41,65,77,78,81,83],channel:[37,43,52,53,83],channels_last:[6,7,8,10,11,12,13,14,15,16,17,18],chaotic:77,character:80,characterist:79,charg:0,chebyshev:[24,60],check:[6,7,8,10,11,12,13,14,15,16,17,18,23,24,25,26,27,28,29,30,31,32,33,40,49,55,60,73,76,87],check_bord:[24,25,26,27,28,29,30,31,32],check_val_every_n_epoch:73,checkpoint:[66,72,87],child:[6,7,8,10,11,12,13,14,15,16,17,18,29],children:[6,7,8,10,11,12,13,14,15,16,17,18],choos:[2,26,31,56,64,73,76,78,83,88],choosen:75,chosen:[24,47,54],christian:26,circ:48,circl:83,circle_grid:83,circleproblem:83,cite:90,claim:0,clamp:64,classic:88,classif:83,clean:3,clear:[6,7,8,10,11,12,13,14,15,16,17,18,80,83,88],clearli:[80,82,83,84],clip:78,clippl:78,clone:[4,33,83],close:[81,86],closur:73,cma:[38,70],cmap:77,code:[3,4,23,40,78,80,81,82,83,88,90],code_formatt:3,codit:[67,71],codomain:[34,39],coeff:41,coeffic:87,coeffici:[39,41,61,67,71,77,79,87],cog:89,coincid:81,collect:[5,21,87],colloc:[19,76,80,88],color:[85,87,88],colorbar:[77,83,86,87],column:[33,35,41,49,55,83],com:4,combin:[37,56,83],come:[76,77],commit:3,common:[3,83],commun:90,compar:[64,66,71,72,74,76,77,78,81,82,87,88],comparison:[6,7,8,10,11,12,13,14,15,16,17,18,81],compat:[4,6,7,8,10,11,12,13,14,15,16,17,18,35,83],competit:66,competitivepinn:[2,80],compil:[6,7,8,10,11,12,13,14,15,16,17,18],complet:[3,76,79,80,81,82,88],complex128:[6,7,8,10,11,12,13,14,15,16,17,18],complex:[6,7,8,9,10,11,12,13,14,15,16,17,18,59,79,80,81,84,85,88],compon:[23,39,49,50,51,55,58,59,61,62,63,72,76,78,79,80,81,82,86,88],componet:77,compos:[6,7,8,10,11,12,13,14,15,16,17,18,48,77,81,83,85,87,88],composit:[60,88],compress:90,comput:[6,7,8,9,10,11,12,13,14,15,16,17,18,23,34,35,37,38,39,40,42,43,45,46,47,48,49,50,51,52,53,54,55,58,64,65,66,68,69,70,71,72,73,74,76,77,78,79,80,83,87,88],compute_residu:64,concaten:[38,80],concatenet:38,conclud:81,condit:[0,2,5,19,23,60,61,62,63,64,73,76,77,78,79,80,81,82,83,84,86,87],conditon:[40,82],conduct:84,confer:[6,7,8,9,10,11,12,13,14,15,16,17,18,42,66],configur:[66,67,69,72,73,74,82],configure_optim:[66,67,69,72,73,74],confus:[6,7,8,10,11,12,13,14,15,16,17,18],congratul:[76,79,80],connect:[0,51,70,80],consid:[6,7,8,10,11,12,13,14,15,16,17,18,23,25,27,28,29,30,39,50,51,54,58,76,77,83,88],consist:[3,6,7,8,10,11,12,13,14,15,16,17,18,55,87],constant:[40,52,53,79],constantlr:[65,66,67,68,69,70,71,72,74,78],constrain:86,constraint:[5,22,40],construct:[6,7,8,10,11,12,13,14,15,16,17,18,33,77,82,83,88],constructor:[22,64,65,66,67,68,69,70,71,72,73,74],consumpt:78,contain:[6,7,8,10,11,12,13,14,15,16,17,18,21,35,50,51,58,67,71,73,78,83,87,88,90],context:[6,7,8,10,11,12,13,14,15,16,17,18,74],continu:[2,5,77,90],continuousclassifi:83,continuousconv2d:35,continuousconv:83,continuousconvblock:[35,83],continuousconvolut:83,continuum:[6,7,8,9,10,11,12,13,14,15,16,17,18],contourf:59,contract:0,contrast:83,contribuit:89,contribut:[89,90],contributor:[0,89],contro:[66,69],control:[66,67,69,71,72,73,81],conv2d:[6,7,8,10,11,12,13,14,15,16,17,18],conv4:[6,7,8,10,11,12,13,14,15,16,17,18],conv5:[6,7,8,10,11,12,13,14,15,16,17,18],conv:[6,7,8,10,11,12,13,14,15,16,17,18,35],convent:80,converg:[6,7,8,9,10,11,12,13,14,15,16,17,18,78],convers:33,convert:[6,7,8,10,11,12,13,14,15,16,17,18,57,77],convolut:[2,5,34,37,39,47,52,53,54,71,77],convolv:83,coord:[39,77],coordin:[31,35,39,40,47,54,58,64,71,74,80,81,82,83,86,87,88],coordinates_indic:[47,54,77],coordinates_mesh:83,copi:[0,6,7,8,10,11,12,13,14,15,16,17,18,33,83],copyright:0,core:[64,73,77,78,80,84,88],cornerston:78,correct:[33,80,83],correctli:[6,7,8,10,11,12,13,14,15,16,17,18,40,57,82],correspond:[6,7,8,10,11,12,13,14,15,16,17,18,20,33,34,39,49,55,59,73,81,82,83,86,87,88],cos:[38,40,80,82,83,88],coscia2023phys:1,coscia:[1,35,67,89],cosin:38,cost:78,costum:[75,82],could:[26,31,73,76,80,81,88],coupl:78,cours:[81,85,87,88],cover:85,coverag:3,coveral:3,cpu:[6,7,8,10,11,12,13,14,15,16,17,18,33,58,76,77,78,80,81,82,83,84,86,87,88],creat:[6,7,8,9,10,11,12,13,14,15,16,17,18,25,27,28,32,38,45,46,56,76,77,78,82,83,84,87],crete:82,criteria:31,criterion:[45,46,83],cross:[24,33],crossentropyloss:83,csvlogger:76,cuda:[6,7,8,10,11,12,13,14,15,16,17,18,33],cup:[32,81,82,86],current:[0,6,7,8,10,11,12,13,14,15,16,17,18,19,33,60,66,71,72,83,84,89],current_condition_nam:64,current_epoch:86,custom:[5,6,7,8,10,11,12,13,14,15,16,17,18,73],d_loss:[76,78,80,81,82,88],damag:0,darci:5,dario:[1,89],dat:77,data:[2,33,35,38,40,41,44,45,46,64,66,67,71,72,73,74,75,78,80,82,83,86,87,90],data_darci:84,data_input:86,data_k:77,data_ks2:77,data_output:86,dataload:[33,64,67,73,74,83],dataloader_idx:73,datapoint:35,dataset:[77,83,84,87],datatyp:[6,7,8,10,11,12,13,14,15,16,17,18],daw:19,ddu:79,ddudxdx:79,deal:[0,82],decai:[65,70],decid:73,declar:81,decod:[71,73,83],decomposit:[2,41,87],decor:33,decreas:[80,83],deep:[6,7,8,9,10,11,12,13,14,15,16,17,18,40,42,83,88,90],deeper:78,deeponet:[2,55,77],deepspe:73,def:[6,7,8,10,11,12,13,14,15,16,17,18,22,35,61,62,63,73,76,77,78,79,80,81,82,83,85,86,87,88],defalut:78,default_root_dir:86,defin:[2,6,7,8,9,10,11,12,13,14,15,16,17,18,23,24,25,26,27,28,30,32,44,48,49,50,51,55,56,60,64,76,78,81,83,84,85,86,87],definit:[23,60,61,62,63,64,67,71,73,76,90],defualt:[67,78],degrad:[26,31],degre:[6,7,8,10,11,12,13,14,15,16,17,18,45,46,78],delta:[77,80,81,82,86],delta_u:[63,86],delv:78,demo:[1,35,67,89],demonstr:[76,78,88],denot:[70,72],depend:[5,24,47,52,53,54,57,60,63,76,77,78,80,82,88],depict:[2,90],deprec:[6,7,8,10,11,12,13,14,15,16,17,18,44],deriv:[40,58,61,76,77,88],descend:[6,7,8,10,11,12,13,14,15,16,17,18],describ:[3,31,45,46,79,80,90],design:[6,7,8,10,11,12,13,14,15,16,17,18,47,49,52,54,55,78],desir:[6,7,8,10,11,12,13,14,15,16,17,18,40,49,55,81],despit:[31,81],destin:[6,7,8,10,11,12,13,14,15,16,17,18],detach:[6,7,8,10,11,12,13,14,15,16,17,18,33,77,83,86,87],detail:[6,7,8,10,11,12,13,14,15,16,17,18,33,48,76,83],determin:29,determinist:[6,7,8,10,11,12,13,14,15,16,17,18,78],develop:[78,89],deviat:[38,41],devic:[6,7,8,10,11,12,13,14,15,16,17,18,33,78,90],dezert:26,diagram:[6,7,8,10,11,12,13,14,15,16,17,18],dict:[6,7,8,10,11,12,13,14,15,16,17,18,20,21,23,24,26,35,40,41,55,56,59,60,64,65,66,67,68,69,70,71,72,73,74,76],dictionari:[6,7,8,10,11,12,13,14,15,16,17,18,20,21,23,24,26,41,56,59,60,73,83,88],diff:88,differ:[2,6,7,8,10,11,12,13,14,15,16,17,18,20,27,28,31,32,35,47,48,49,52,53,54,55,56,58,60,73,76,77,78,80,81,82,85,87,88,89,90],differen:83,differenti:[2,37,38,52,53,58,65,66,68,69,70,71,72,75,77,84,87,90],difficulti:77,digit:89,dim:[35,40,80,83],dim_grid:83,dim_t:77,dim_x:77,dimens:[24,25,26,27,28,31,32,34,35,37,38,39,40,42,43,47,48,49,50,51,52,53,54,55,66,69,71,72,77,79,83,84,87,88],dimension:[5,34,37,39,40,43,47,48,52,53,54,62,80,83,85,87],direct:[6,7,8,10,11,12,13,14,15,16,17,18,35,76,77,78,80,81,82,83,88],directli:[4,76,78,83,87],directori:86,dirichlet:[23,77,79,81,84],disabl:[6,7,8,10,11,12,13,14,15,16,17,18],discourag:78,discoveri:[6,7,8,9,10,11,12,13,14,15,16,17,18],discrat:77,discret:[76,77,83,86],discretis:[71,74,80,81,88],discretise_domain:[60,76,78,80,81,82,86,88],discrimin:[66,67],distribut:[0,4,26,67,77],div:58,dive:78,diverg:[58,79,88],divergeng:58,divgrad:58,divid:[2,23,44,45,46],divis:[45,46,64],docstr:[6,7,8,10,11,12,13,14,15,16,17,18],document:[0,4,6,7,8,10,11,12,13,14,15,16,17,18,76,83,85,88],doe:[47,52,54,57,66,71,78],doesn:83,doi:[6,7,8,9,10,11,12,13,14,15,16,17,18,19,34,35,37,38,40,42,47,49,51,52,53,55,65,67,68,69,70,71,72],doing:[39,78,82,85],domain:[23,24,25,26,27,28,30,31,32,35,39,48,57,59,60,61,62,63,76,77,80,81,82,83,84,86,87,88],don:[77,83],done:[3,33,40,71,76,77,78,83,85,86,87,90],dong:40,dot:[38,39,45,46,70,72,77],doubl:[6,7,8,10,11,12,13,14,15,16,17,18],download:[83,84],driven:[67,71],dropout:[6,7,8,10,11,12,13,14,15,16,17,18,36],dropout_prob:36,dst_type:[6,7,8,10,11,12,13,14,15,16,17,18],dtype:[6,7,8,10,11,12,13,14,15,16,17,18,33,77,84],dudt:[63,79,82],dudx:79,due:[81,82],duplic:[6,7,8,10,11,12,13,14,15,16,17,18],dure:[6,7,8,9,10,11,12,13,14,15,16,17,18,20,21,23,59,65,71,74,76,78,79,80,81,83,86,87],dynam:[77,87],each:[3,6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,23,37,38,40,43,45,46,48,55,59,64,66,72,73,76,78,79,81,85,88],earli:89,earlystop:78,eas:23,easi:[3,23,59,76,77,78,80,90],easier:33,easili:[76,78,81,83],effect:[6,7,8,10,11,12,13,14,15,16,17,18],effici:[26,78],eigenvector:[38,80],either:[6,7,8,10,11,12,13,14,15,16,17,18,33,35,40,44,78,89],elast:84,element:[23,44,45,46,60,64,67,74,85],elementwis:[6,7,8,9,10,11,12,13,14,15,16,17,18],elip:26,ell:[45,46],ell_k:77,ellips:85,ellipse_bord:85,ellipse_no_bord:85,ellipsoid1:[25,27,28,32],ellipsoid2:[25,27,28,32],ellipsoid:[24,25,26,27,28,32,85],ellipsoid_bord:85,ellipsoid_border_sampl:85,ellipsoid_dict:26,ellipsoid_no_bord:85,ellipsoid_no_border_sampl:85,ellipsoiddomain:[2,25,27,28,30,32,60,85],ellipt:84,els:77,elu:[2,7],emb:[34,39,48,77],embed:[2,34,39,47,48,54,77],embedding1:80,embedding2:80,embedding_dimenion:39,embedding_dimes:77,emploi:[77,87],empti:[83,86],enabl:[20,57],enable_model_summari:[76,77,78,80,81,82,83,84,88],encapsul:[78,81,88],encod:[38,71,73,83],end:[6,7,8,10,11,12,13,14,15,16,17,18,19,21,45,46,64,65,66,68,69,70,71,72,73,76,77,78,79,80,81,82,86,88],energi:83,enforc:[6,7,8,10,11,12,13,14,15,16,17,18,23,40,73,79,88],engin:[6,7,8,9,10,11,12,13,14,15,16,17,18,38,65,68,70,90],enhanc:[2,36,68],enhanced_linear:36,enhancedlinear:2,enough:80,ensur:[3,6,7,8,10,11,12,13,14,15,16,17,18,40,78,88],entir:[47,49,52,54,55],entiti:[29,30],entri:[6,7,8,10,11,12,13,14,15,16,17,18,49,55],enumer:[6,7,8,10,11,12,13,14,15,16,17,18,83,86,87],env:[6,7,8,10,11,12,13,14,15,16,17,18],epoch:[19,20,21,64,70,73,77,78,80,81,82,83,84,86,87,88],epoch_switch:20,epochs_sav:86,eps:65,epsilon:65,equal:[26,31,33,37,43,81,83,86],equat:[5,22,37,38,52,53,61,62,63,64,65,66,68,69,70,72,78,80,81,82,83,84,85,86,87,90],equation_factori:82,equation_interfac:23,equationinterfac:[23,64,65,66,68,69,70,72],equival:[6,7,8,10,11,12,13,14,15,16,17,18],erc:89,err:84,err_:87,err_test:77,err_train:77,error:[6,7,8,10,11,12,13,14,15,16,17,18,33,45,46,65,66,68,69,71,72,74,76,77,78,80,81,83,84,87,88],error_metr:77,especi:[78,80,87],essenti:78,estim:26,eta:70,etc:[6,7,8,10,11,12,13,14,15,16,17,18,22],euclidean:[45,46],eval:[6,7,8,10,11,12,13,14,15,16,17,18,35,83],evalu:[6,7,8,9,10,11,12,13,14,15,16,17,18,22,23,39,44,45,46,64,65,66,68,69,70,72,76,81,82,86,88],even:[3,38,77,78,83],event:0,eventu:23,everi:[6,7,8,9,10,11,12,13,14,15,16,17,18,49,50,51,55,70,73,75,86,87],evolv:82,exact:[81,82,88],exactli:[6,7,8,10,11,12,13,14,15,16,17,18,40,82,88],exampl:[6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,22,24,25,26,27,28,31,32,33,34,35,36,39,48,49,55,59,60,61,62,63,64,73,76,78,80,83,84,85,87],example_dirichlet:22,example_domain:22,example_input_pt:22,example_output_pt:22,except:[6,7,8,10,11,12,13,14,15,16,17,18,39,50,51,54],excercis:77,exclud:25,exclus:[2,25,60],execut:[6,7,8,10,11,12,13,14,15,16,17,18,41,78],exercis:3,exhibit:80,exist:[3,6,7,8,10,11,12,13,14,15,16,17,18],exp:[2,6,7,8,15,16,17,18,65,76,78,82,86],expand:[39,41,87],expans:40,expas:88,expeci:78,expect:[6,7,8,10,11,12,13,14,15,16,17,18,34,35,37,39,43,47,48,50,51,52,53,54,66,67,69,72,76,77,78,82,86,87],expens:87,expert:[40,88],explain:[76,83],explan:[6,7,8,10,11,12,13,14,15,16,17,18],exploit:[81,82,83,87,88],explor:[20,78],exponenti:[6,7,8,9,10,11,12,13,14,15,16,17,18,65],express:[0,80,88],extact:78,extend:[81,84],extens:[40,78,83,85,90],extent:77,extra:[5,6,7,8,10,11,12,13,14,15,16,17,18,48,57,64,72,73],extra_featur:[57,64,65,66,68,69,70,71,72,73,74,81],extra_repr:[6,7,8,10,11,12,13,14,15,16,17,18],extract:[19,22,33,49,55,57,61,62,63,64,76,77,78,79,80,81,82,84,85,86,88],extrafeatur:[81,82,88],extrema:[24,26],f_1:[35,83],f_2:[35,83],facilit:78,fact:[82,83],factor:[55,65,66,67,68,69,70,71,72,74],failur:19,fairli:76,fals:[6,7,8,9,10,11,12,13,14,15,16,17,18,24,25,26,27,28,29,30,31,32,33,35,42,45,46,59,67,73,76,77,78,80,81,82,83,84,85,88],fancier:73,fashion:87,faster:81,favor:[6,7,8,10,11,12,13,14,15,16,17,18],favourit:77,featur:[3,5,57,64,65,68,69,70,71,72,73,74,78],federico:71,feed:[35,80,81,83],feedforward:[2,23,42,49,51,55,56,76,77,78,80,81,83,86,87,88],feel:[77,80],few:[3,90],ffn:81,ffn_dict:56,field:[6,7,8,10,11,12,13,14,15,16,17,18,34,35,39,41,47,48,53,54,58,81,82,83,84,87],field_indic:[47,54,77],fig:[77,83,85,87,88],figsiz:[77,83,85,87,88],figur:[48,76],file:[0,39,59,87],filenam:59,fill:85,fill_:[6,7,8,10,11,12,13,14,15,16,17,18],filter:35,filter_dim:[35,83],final_lay:80,finali:[76,77,80,81,82,88],find:[2,3,25,65,66,68,69,70,71,72,74,77,80,83,85,86],fine:83,finetun:[6,7,8,10,11,12,13,14,15,16,17,18],fire:[6,7,8,10,11,12,13,14,15,16,17,18],first:[25,33,35,41,42,73,77,78,80,81,82,83,84,85,88],firstli:[83,85],fit:[0,41,73,77,78,80,81,82,83,84,86,87,88],fit_pod:87,five:[2,90],fix:[3,6,7,8,9,10,11,12,13,14,15,16,17,18,23,35,59,65,76,77,78,79,82,83],fixed_vari:[59,82],fixedcurl:79,fixedflux:[23,79],fixedgradi:[23,79],fixedoper:79,fixedvalu:[23,61,62,63,76,78,79,80,81,82,86],flag:[35,75,83],flatten:[83,87],flexibl:76,float16:[6,7,8,10,11,12,13,14,15,16,17,18],float32:79,float64:[6,7,8,10,11,12,13,14,15,16,17,18],floor:[37,43],flow:[5,51,78],fluid:87,flux:23,fno:[2,77],focu:[72,77,78,84],folder:5,follow:[0,1,2,3,6,7,8,10,11,12,13,14,15,16,17,18,31,38,40,70,71,72,74,76,77,78,79,82,83,84,88,89,90],foral:[40,77],forc:[23,80,81,84,86,90],force_term:[81,86],forev:78,fork:3,form:[3,6,7,8,10,11,12,13,14,15,16,17,18,35,39,50,51,55,76,77,82,83,84],formal:90,format:[1,3,6,7,8,10,11,12,13,14,15,16,17,18,86],former:71,formual:[66,67,71,72,74],formul:[22,65,68,69,70,73,88],formula:[38,40],fortun:78,forward:[6,7,8,9,10,11,12,13,14,15,16,17,18,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,65,66,67,68,69,70,71,72,73,74,77,78,80,81,82,83,87,90],forward_map:57,forward_pr:[6,7,8,10,11,12,13,14,15,16,17,18],found:[6,7,8,10,11,12,13,14,15,16,17,18,73,80],fourier:[2,5,40,43,47,52,53,54,77],fourier_embed:80,fourierblock1d:37,fourierblock2d:37,fourierblock3d:37,fourierfeatureembed:[38,80],fourierintegralkernel:2,fourth:77,foward:82,frac:[15,16,17,18,34,40,45,46,65,66,68,69,70,71,72,74,76,77,79,82,88],framework:[76,90],free:[0,3,64,77,80],freez:[6,7,8,10,11,12,13,14,15,16,17,18],frequenc:[19,38,73,80],friendli:90,from:[0,6,7,8,10,11,12,13,14,15,16,17,18,19,23,25,27,28,29,30,31,32,33,34,37,38,39,42,43,44,45,46,47,48,49,52,53,54,55,59,60,61,62,63,64,65,66,68,72,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89],frontend:59,frontier:[24,26,31,32],full:[78,84],fulli:[6,7,8,10,11,12,13,14,15,16,17,18,80],func:[6,7,8,9,10,11,12,13,14,15,16,17,18,34,39,47,50,51,52,53,54,76,77,78,81,83,86,87],fund:[89,90],fundament:[23,60],furnish:0,fusion:26,futur:[4,6,7,8,10,11,12,13,14,15,16,17,18,87],gamma0:80,gamma0_loss:80,gamma1:[63,79,80,81,82,86],gamma1_loss:[80,81,82],gamma2:[63,79,81,82,86],gamma2_loss:[81,82],gamma3:[81,82,86],gamma3_loss:[81,82],gamma4:[81,82,86],gamma4_loss:[81,82],gamma:[6,7,8,9,10,11,12,13,14,15,16,17,18,67,70,80],gamma_1:[81,82,86],gamma_2:[81,82,86],gamma_3:[81,82,86],gamma_4:[81,82,86],gamma_i:[81,82],gan:[6,7,8,10,11,12,13,14,15,16,17,18,73],garom:2,gashler:[6,7,8,9,10,11,12,13,14,15,16,17,18],gelu:[2,10,34,47],gener:[2,3,6,7,8,9,10,11,12,13,14,15,16,17,18,26,47,49,52,54,55,60,67,82,83,85,89,90],generaliz:88,geometri:[5,24,25,26,27,28,29,30,31,32,60,61,62,63,76,78,79,80,81,82,86,87,88],geometryunion:32,georg:[6,7,8,9,10,11,12,13,14,15,16,17,18,70],get:[78,82,83,85,90],get_buff:[6,7,8,10,11,12,13,14,15,16,17,18],get_extra_st:[6,7,8,10,11,12,13,14,15,16,17,18],get_paramet:[6,7,8,10,11,12,13,14,15,16,17,18],get_submodul:[6,7,8,10,11,12,13,14,15,16,17,18],get_swap_module_params_on_convers:[6,7,8,10,11,12,13,14,15,16,17,18],gianluigi:[1,89],git:[3,4],github:[4,80],give:[77,88],given:[6,7,8,9,10,11,12,13,14,15,16,17,18,22,23,25,27,28,32,34,39,41,64,65,66,68,69,70,72,74,76,77,78,79,80,83,86,88,90],global:[6,7,8,10,11,12,13,14,15,16,17,18,52,53,72,73],goal:[86,87],godfrei:[6,7,8,9,10,11,12,13,14,15,16,17,18],goea:89,going:[76,77,78,84,87,88],good:[3,83,88],govern:[64,65,66,68,69,70,72],gpinn:[2,80],gpu1:[6,7,8,10,11,12,13,14,15,16,17,18],gpu:[6,7,8,10,11,12,13,14,15,16,17,18,58,73,76,77,78,80,84,88],grad:[6,7,8,10,11,12,13,14,15,16,17,18,33,58,61,62,63,76,78,79,82],grad_input:[6,7,8,10,11,12,13,14,15,16,17,18],grad_output:[6,7,8,10,11,12,13,14,15,16,17,18],gradient:[6,7,8,10,11,12,13,14,15,16,17,18,23,33,51,58,68,76,78,79,83,90],gradient_clip_v:78,grant:0,granular:38,graph:[33,71,83],graphic:81,grate:89,gratitud:89,great:[80,82,88,89],greater:[26,31],green:88,grid2:83,grid:[24,59,60,76,80,81,83,86,88],group:[3,89],guarante:[4,6,7,8,10,11,12,13,14,15,16,17,18,23,40],guid:[40,88],guidelin:3,h2020:89,had:83,half:[6,7,8,10,11,12,13,14,15,16,17,18],hand:22,handl:[6,7,8,10,11,12,13,14,15,16,17,18,64,72,73,80],hanwen:38,happen:79,hard:[5,40,77,86],hard_spac:82,hard_t:82,hardli:80,hardmlp:82,hardmlptim:82,has:[1,3,6,7,8,10,11,12,13,14,15,16,17,18,33,34,39,49,55,56,73,76,78,80,82,83,85,86,89],have:[3,6,7,8,10,11,12,13,14,15,16,17,18,33,35,40,41,66,73,76,77,78,79,80,81,82,83,84,85,86,87,88,89],have_sampled_point:60,haven:78,heart:85,heat:84,helmotz:5,helmotz_equ:88,helmotz_sol:88,help:[3,6,7,8,10,11,12,13,14,15,16,17,18,38,80,81],helper:77,henc:[38,67,71],her:82,here:[2,61,62,63,73,76,78,80,81,82,83,84,85,86,87,88],herebi:0,hesthaven:[71,87],hidden:[34,39,42,47,48,50,51,54,77,81,83],hidden_dim:42,hidden_dimens:83,hidden_s:[34,39],high:[2,19,38,80,81,84,85,90],higher:[26,31,84,85,88],highest:83,highli:78,highlight:[76,87],highlihgt:85,his:82,histori:3,hold:[76,78],holder:0,homogen:86,hook:[6,7,8,10,11,12,13,14,15,16,17,18,73,78],hope:76,host:[6,7,8,10,11,12,13,14,15,16,17,18],how:[6,7,8,10,11,12,13,14,15,16,17,18,56,73,75,76,77,78,79,80,81,82,83,84,85,86,87,88],howev:[6,7,8,10,11,12,13,14,15,16,17,18,78,80,83],hpu:[77,78,80,84,88],html:80,http:[4,31,35,37,42,80],hypercub:[24,60,76,88],hyperellipsoid:26,hyperparamet:[65,77,78],ic3k:[6,7,8,9,10,11,12,13,14,15,16,17,18],iclr:66,idea:[80,83],identifi:[33,72],idx:[6,7,8,10,11,12,13,14,15,16,17,18,87],idx_:87,ieee:[6,7,8,9,10,11,12,13,14,15,16,17,18,42],ignor:[6,7,8,10,11,12,13,14,15,16,17,18],ignore_w:[6,7,8,10,11,12,13,14,15,16,17,18],imag:[42,83],image_transform:83,immedi:[6,7,8,10,11,12,13,14,15,16,17,18],implement:[6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,23,24,25,26,27,28,29,31,32,34,35,37,39,40,42,43,45,46,47,49,50,51,52,53,54,55,56,57,58,59,60,64,65,66,67,68,69,70,71,72,74,77,78,79,80,81,82,83,84,85,90],impli:0,implicit:77,impos:[40,77,78,80,82,86],improv:[6,7,8,9,10,11,12,13,14,15,16,17,18,78,80,87,88],imshow:[77,84],in_featur:[6,7,8,10,11,12,13,14,15,16,17,18,35,49,55,80],includ:[0,3,6,7,8,10,11,12,13,14,15,16,17,18,24,25,26,27,28,31,32,39,49,50,51,55,60,73,76,79,84,85],incompatible_kei:[6,7,8,10,11,12,13,14,15,16,17,18],incorpor:81,increas:41,increment:19,inde:80,indec:[49,55],indeces_variables_extract:55,indent:3,independent:24,index:[64,66,67,72,73,74],indic:[73,76,77,88],indistinguish:76,individu:[6,7,8,10,11,12,13,14,15,16,17,18],industri:87,infeas:88,infer:87,inflow:87,inform:[1,6,7,8,9,10,11,12,13,14,15,16,17,18,19,38,40,49,51,55,64,65,66,68,69,70,72,78,80,81,83,86,88],infti:88,inher:[23,44,60],inherit:[29,30,48,64,65,68,73,76,78,79,81,82,83,84,86,87],inhert:85,inifinit:88,inifit:88,init:[6,7,8,10,11,12,13,14,15,16,17,18],init_weight:[6,7,8,10,11,12,13,14,15,16,17,18],initi:[3,6,7,8,9,10,11,12,13,14,15,16,17,18,23,31,33,36,37,42,47,52,53,54,56,76,77,78,79,80,81,82,85,86,87],initial_cond_test:77,initial_cond_train:77,initial_condit:[63,79,82],inlin:87,inner:[34,35,39,49,52,53,55],inner_s:[39,50,51,52,53,54,84],inplac:[6,7,8,10,11,12,13,14,15,16,17,18],input:[6,7,8,9,10,11,12,13,14,15,16,17,18,22,23,24,26,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,57,58,60,61,62,64,65,66,67,68,69,70,71,72,73,74,76,79,80,81,82,84,86,87,88],input_:[22,23,58,61,62,63,76,78,79,80,81,82,83,86,88],input_data2:83,input_data:83,input_dim:[42,82],input_dimens:[38,39,40,50,51,76,77,78,80,81,83,84,86,87,88],input_dimenson:[49,55],input_indeces_branch_net:[49,55],input_indeces_trunk_net:[49,55],input_numb_field:[35,37,43,53,83],input_point:[22,67,71,77,83,84,86,87],input_pt:[71,74,76],input_tensor:[64,66,71,72,74],input_vari:[57,60,76,77,78,81,82,83,84,86],insert:78,insid:[24,25,26,27,28,29,30,31,32,64,76,80,81],insight:81,inspect:76,inspir:48,instal:[84,90],instanc:[6,7,8,10,11,12,13,14,15,16,17,18,49,55,59,64,73,79,90],instead:[6,7,8,10,11,12,13,14,15,16,17,18],int_:[77,83],integ:[19,46,49,55],integr:[6,7,8,10,11,12,13,14,15,16,17,18,35,48,53,72,77,83],integral_kernel:48,intel:49,intend:3,intention:[64,71,72,74],interfac:[2,64,78,90],interior:26,intern:[6,7,8,9,10,11,12,13,14,15,16,17,18,37,56,57,66,73,76],interpol:[71,83],interpolation_network:71,intersect:[2,30,60],interv:73,intitialis:83,introduc:[76,79,80],introduct:[5,78],introductori:76,intrus:[71,87],intuit:90,invalid:[6,7,8,10,11,12,13,14,15,16,17,18],invalu:3,invari:87,invers:[5,23,64,65,66,68,69,70,72,90],inverseproblem:[23,86],invok:[6,7,8,10,11,12,13,14,15,16,17,18,41],involv:[31,90],ipu:[6,7,8,10,11,12,13,14,15,16,17,18,77,78,80,84,88],is_insid:[24,25,26,27,28,29,30,31,32,85],isol:78,issu:3,itali:89,item:[78,80,83,85,87],iter:[6,7,8,10,11,12,13,14,15,16,17,18,73,83],its:[6,7,8,9,10,11,12,13,14,15,16,17,18,40,41,48,73,76,79,87],itself:[6,7,8,10,11,12,13,14,15,16,17,18],ivagn:[1,89],jagtap:[6,7,8,9,10,11,12,13,14,15,16,17,18],jan:71,jcp:[6,7,8,9,10,11,12,13,14,15,16,17,18,40,71,72],jean:26,jeremi:68,jin:[49,55],job:78,joint:[6,7,8,9,10,11,12,13,14,15,16,17,18],journal:[1,6,7,8,9,10,11,12,13,14,15,16,17,18,39,40,48,51,54,55,71,72,87],juan:70,jump:[35,83],just:[2,3,4,76,78,79,81,83,85,87,90],jut:83,k_1:48,k_i:48,k_m:48,k_test:84,k_train:84,kaim:42,karniadaki:[6,7,8,9,10,11,12,13,14,15,16,17,18,69,70],kawaguchi:[6,7,8,9,10,11,12,13,14,15,16,17,18],keep:[78,81,83],keep_var:[6,7,8,10,11,12,13,14,15,16,17,18],kei:[6,7,8,10,11,12,13,14,15,16,17,18,24,26,40,41,55,59,60,73,78],kenji:[6,7,8,9,10,11,12,13,14,15,16,17,18],kept:[59,65],kernel:[37,43,48,53,54,77],kernel_s:[6,7,8,10,11,12,13,14,15,16,17,18],kernelneuraloper:[2,47,52,54,77],kevrekidi:69,keyword:[6,7,8,10,11,12,13,14,15,16,17,18,20,64,65,66,67,68,69,70,71,72,73,74,75,83],kind:[0,86],know:[3,73,77,78],knowledg:[6,7,8,9,10,11,12,13,14,15,16,17,18],kovachki:[34,37,39,47,48,52,53,54],kuramoto:5,kutta:77,kwarg:[6,7,8,10,11,12,13,14,15,16,17,18,22,33,59,73,75],l2_error:83,l2_loss:80,l_1:[40,45,46],l_2:[80,81,83],l_n:[45,46],label:[23,31,33,47,54,57,58,76,77,81,83,84,85,86,87,88],label_tensor:[33,49,55,57,85],label_to_extract:33,labeltensor:[2,6,7,8,9,10,11,12,13,14,15,16,17,18,22,23,24,25,26,27,28,31,32,40,49,55,57,58,64,65,66,67,68,69,70,71,72,74,76,77,81,83,84,85,87],lambda:88,lambda_:[70,72,88],lambda_i:70,lambda_k:67,lambdalr:73,land:[25,27,28],langl:39,lanthal:[34,47],laplac:[23,58,79,81,86],laplace_equ:[81,86],laplacian:[58,63,79,80,81,82,86,88],laplacian_u:81,last:[35,39,50,51,54,79,83],lastli:85,latent:[71,83,87],later:83,latin:[24,60,76,88],latter:[6,7,8,10,11,12,13,14,15,16,17,18],law:[80,90],layer:[35,36,38,40,41,47,48,50,51,52,53,54,76,77,78,80,81,82,83,86,87,88],layout:88,lbfg:73,lead:[78,82],learn:[38,39,42,47,48,49,52,54,55,64,65,66,67,68,69,70,71,72,73,74,76,77,78,82,83,84,87],learningratemonitor:73,least:[65,68],led:89,left:[34,38,39,40,45,46,65,70,72,77,80,81,83,88],leftarrow:70,legend:[86,88],len:[25,27,28,32,47,54,76,77,78,81,82,83,85,86,87],leq:7,less:[20,83],let:[6,7,8,10,11,12,13,14,15,16,17,18,33,77,78,80,81,82,83,84,85,86,87,88],level:[2,85,87,90],levi:72,liabil:0,liabl:0,librari:[87,90],licens:90,lift:[47,48,52,53,54,77],lifting_net:[47,52,53,54,77,84],lifting_oper:48,lightin:76,lightinig:76,lightn:[5,20,21,72,73,75,87],lightningmodul:73,like:[1,6,7,8,10,11,12,13,14,15,16,17,18,35,73,83],limit:0,linalg:[45,46,83],line:[6,7,8,10,11,12,13,14,15,16,17,18,83,90],linear:[3,6,7,8,9,10,11,12,13,14,15,16,17,18,34,35,36,37,39,40,42,43,49,55,77,80,82,83,84],linear_lay:36,lint:3,linux:4,list:[6,7,8,9,10,11,12,13,14,15,16,17,18,20,21,23,24,25,26,27,28,30,31,32,33,35,37,39,43,45,46,47,48,49,50,51,52,53,54,55,57,58,59,60,64,66,67,69,72,73,74,76,78,79,81,83,85],list_equ:23,liu:[37,39,48,52,53,54],live:[6,7,8,10,11,12,13,14,15,16,17,18],load:[2,6,7,8,10,11,12,13,14,15,16,17,18,75,76,77,86,87],load_state_dict:[6,7,8,10,11,12,13,14,15,16,17,18],loaded_weight:33,loadmat:[77,84],loc:88,local:[4,6,7,8,10,11,12,13,14,15,16,17,18,34,39],locat:[2,5,19,22,24,26,30,31,60,76,77,78,79,80,81,82,86,88],log:[59,66,72,73,76,86],log_every_n_step:[77,87],logarithm:[6,7,8,9,10,11,12,13,14,15,16,17,18],logdir:78,loger:77,logged_metr:[76,78],logger:[64,73,76,78],logi:[59,76,81],logic:78,logist:14,lognorm:87,logx:59,longer:[76,77,80,81,82,84,87,88],look:[3,6,7,8,10,11,12,13,14,15,16,17,18,71,85],loop:[83,87],lor:[27,32],loss:[44,45,46,59,64,65,66,67,68,69,70,71,72,73,74,76,77,78,80,81,82,83,84,86,88],loss_data:[64,66,71,72,74],loss_phi:[64,65,66,68,69,70,72],loss_valu:64,lossinterfac:[2,44,45,46],love:3,low:[2,19,80,83,87],lower:[78,82,84],lowrank:54,lowrankblock:[39,54],lowrankneuraloper:[2,54],lploss:[2,80,84],lr_schedul:[65,66,67,68,69,70,71,72,73,74,80],lr_scheduler_config:73,lrschedul:[65,66,67,68,69,70,71,72,74],luke:[6,7,8,9,10,11,12,13,14,15,16,17,18],lvert:70,mac:4,mach:49,machin:[39,47,48,49,52,54,55,69,81,87,90],macro:80,made:[6,7,8,10,11,12,13,14,15,16,17,18,73,84,85],mag:87,magnitud:[81,87],mai:[6,7,8,10,11,12,13,14,15,16,17,18,33],main:[33,76,86,87],mainli:81,maintain:3,make:[3,6,7,8,9,10,11,12,13,14,15,16,17,18,23,33,37,77,78,81,84,85],manag:[6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,81],mandatori:76,mani:[23,73,75,76,77,80,81,82,83,84,88,89],manifold:87,manipul:[33,76,81,82],manner:90,mantain:[23,78],manual:[73,78],manual_se:83,map:[6,7,8,10,11,12,13,14,15,16,17,18,37,39,42,43,47,48,49,52,54,55,71,74,80,83],maria:89,marker:86,mask:72,mat:[77,84],match:[6,7,8,10,11,12,13,14,15,16,17,18,40],materi:84,math:35,mathbb:[6,7,8,9,10,11,12,13,14,15,16,17,18,25,27,28,32,34,39,40,48,65,66,68,69,70,71,72,74,77,83,88],mathbf:[9,38,40,65,66,68,69,70,71,72,74,80,81,83],mathcal:[34,38,48,65,66,68,69,70,71,72,74,80,83,88],mathemat:[77,80,81,83,87,88],mathlab:[4,80,89],mathmat:80,matplotlib:[4,59,77,83,84,85,86,87,88],matrix:[31,34,38,39,41,77],max:[6,12,49,55,83],max_:72,max_epoch:[76,77,78,80,81,82,83,84,86,87,88],max_j:70,maxim:[66,76],maximum:83,mc_step:67,mcclenni:72,mean:[23,39,41,44,45,46,49,50,51,55,59,65,66,68,69,71,72,73,74,76,77,83,84,87],mean_loss:[76,77,78,80,81,82,84,87,88],meantim:44,measur:[45,46,87],mech:35,mechan:[6,7,8,10,11,12,13,14,15,16,17,18,38,64,65,68,70],media:84,member:[6,7,8,10,11,12,13,14,15,16,17,18],memo:[6,7,8,10,11,12,13,14,15,16,17,18],memori:[6,7,8,10,11,12,13,14,15,16,17,18,41],memory_format:[6,7,8,10,11,12,13,14,15,16,17,18],meneghetti:35,meng:55,mention:3,merchant:0,merg:[0,24,33,85],mesh:[34,35,39,47,48,54,66,69,72,77,83],meshgrid:83,messag:3,meth:87,method:[6,7,8,10,11,12,13,14,15,16,17,18,19,26,29,31,33,35,38,40,41,44,45,46,47,49,52,54,55,56,57,58,59,64,65,66,68,70,71,72,73,75,76,77,78,81,82,83,85,87,88],methodolog:87,metric:[21,59,73,76,78,81,84],metric_err:84,metric_to_track:73,metric_v:73,metrictrack:[21,76,81],metrictrak:78,michael:[6,7,8,9,10,11,12,13,14,15,16,17,18],micro:80,mid:[25,27,28,32],might:[73,77,78,83],mileston:80,min:[6,49,55,78],min_:72,minim:[22,64,65,66,67,68,69,70,71,72,74,76,79,81,82,83],minimum:[76,83,87],minor:3,minut:[77,80,82,83],mionet:[2,49,77],mish:[2,11],mismatch:20,miss:[6,7,8,10,11,12,13,14,15,16,17,18],missing_kei:[6,7,8,10,11,12,13,14,15,16,17,18],mit:0,mitig:[19,51],mix:78,mlp:[35,39,50,51,54,87],modal:87,mode:[6,7,8,10,11,12,13,14,15,16,17,18,24,25,26,27,28,31,32,33,35,37,41,43,52,53,60,73,76,85,87,88],model:[1,5,6,7,8,10,11,12,13,14,15,16,17,18,20,22,23,35,39,44,45,46,47,48,49,50,51,52,54,55,56,57,64,65,66,67,68,69,70,71,72,73,74,76,77,78,79,80,81,83,84,86,88,90],model_feat:81,model_lean:81,modern:90,modif:[6,7,8,10,11,12,13,14,15,16,17,18,33,78],modifi:[0,6,7,8,10,11,12,13,14,15,16,17,18,78,81],modul:[2,6,7,8,9,10,11,12,13,14,15,16,17,18,25,27,28,29,30,32,34,35,36,37,38,39,40,41,42,43,44,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,80,81,82,83,84,85,87,88],modular:77,moduledict:[71,72],modulelist:[35,55],moment:85,monitor:73,montecarlo:67,more:[2,6,7,8,10,11,12,13,14,15,16,17,18,23,33,59,60,76,77,78,79,80,81,82,83,84,85,88],moreov:86,most:[37,43,49,55,78,85],move:[6,7,8,10,11,12,13,14,15,16,17,18,84],moya:71,mps:[77,78,80,88],mse:[76,77],mseloss:[64,65,66,68,69,70,71,72,74,83],mu1:86,mu2:86,mu_1:86,mu_2:86,mu_i:71,multi:[6,7,8,10,11,12,13,14,15,16,17,18,38,73,80],multidimension:58,multifeedforward:2,multilay:[35,50,87],multipl:[19,20,24,38,48,55,56,58,64,73,74,77,78,79,80,81,82,83,88,90],multiscal:[5,38],multiscale_pinn:80,multiscalefouriernet:80,multisteplr:80,musso:26,must:[6,7,8,9,10,11,12,13,14,15,16,17,18,23,30,35,37,38,43,47,51,54,58,64,67,68,71,73,76,83,87],n_kernel_lay:54,n_layer:[39,47,50,51,52,53,54,77],n_mode:[37,43,52,53,84],n_t:65,n_test:87,n_train:87,nabla:84,nabla_:68,nabla_u:82,naiv:78,naivemetrictrack:78,name:[6,7,8,10,11,12,13,14,15,16,17,18,23,58,59,64,71,73,81,86],named_buff:[6,7,8,10,11,12,13,14,15,16,17,18],named_children:[6,7,8,10,11,12,13,14,15,16,17,18],named_modul:[6,7,8,10,11,12,13,14,15,16,17,18],named_paramet:[6,7,8,10,11,12,13,14,15,16,17,18],namedtupl:[6,7,8,10,11,12,13,14,15,16,17,18],nat:49,natur:[69,76],navier:87,navierstokeequ:79,navierstokesdataset:87,naxian:40,ncol:83,ndim:40,necessari:[3,87],need:[3,6,7,8,10,11,12,13,14,15,16,17,18,19,20,33,41,73,76,77,78,81,82,83,84,85,86,87,88,90],nest:[6,7,8,10,11,12,13,14,15,16,17,18],net:[6,7,8,10,11,12,13,14,15,16,17,18,47,49,54,55,71,77,82,83],net_b:[6,7,8,10,11,12,13,14,15,16,17,18],net_c:[6,7,8,10,11,12,13,14,15,16,17,18],neto:72,network:[1,2,6,7,8,9,10,11,12,13,14,15,16,17,18,19,35,38,39,40,47,49,50,51,52,53,54,55,56,64,65,66,67,68,69,70,71,72,73,74,77,78,81,82,83,86,87],neural:[1,6,7,8,9,10,11,12,13,14,15,16,17,18,19,34,35,37,38,39,40,48,49,51,52,53,55,64,65,66,67,68,69,70,71,72,73,74,78,80,81,82,83,86,87,88],neural_net:[66,69,71,72,74,84],neuraloperatorproblem:77,neuraloperatorsolv:84,neuron:[39,50,51,54,81],never:33,nevertheless:83,new_domain:[24,88],new_optim:20,new_optimizers_kwarg:20,new_point:60,next:[48,73,89],nice:[77,80,81,82,88],nicola:[1,89],nikolao:70,nn_:88,no_grad:[6,7,8,10,11,12,13,14,15,16,17,18,77,83,88],no_overlap:35,no_sol:77,no_sol_test:77,no_sol_train:77,nois:83,non:[6,7,8,10,11,12,13,14,15,16,17,18,34,35,39,71,77,78,87],non_block:[6,7,8,10,11,12,13,14,15,16,17,18],none:[6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,35,36,39,40,44,45,46,50,51,52,53,57,58,59,64,65,66,67,68,69,70,72,73,74,75,76,77,83],noninfring:0,nonlinear:[49,71,77,87,90],nonloc:[34,47,77],norm:[45,46,83,87],normal:[3,42,73,83],not_sampled_point:60,note:[6,7,8,10,11,12,13,14,15,16,17,18,24,44,60,65,77,80,81,83,87],notic:[0,76,77,78,82,83,84,85,86,88],notimplementederror:58,novel:90,now:[33,76,77,78,79,80,81,82,83,84,85,86,87,88],nrow:83,num_featur:[6,7,8,10,11,12,13,14,15,16,17,18],numb_class:83,numb_test:83,numb_train:83,number:[1,6,7,8,10,11,12,13,14,15,16,17,18,20,23,24,25,26,27,28,31,32,34,35,37,38,39,43,44,45,46,47,48,49,50,51,52,53,54,59,60,66,67,69,72,77,78,80,81,83,84,85],number_input_fil:83,number_of_coordin:77,numer:[76,77,87],numpi:[4,86,87],object:[6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,29,31,34,39,59,60,76,77,78,83,85,86,87],obtain:[0,33,34,37,38,39,43,47,48,52,53,54,71,77,78,80,81,83,88],obvious:80,obviusli:88,ode:[76,78],ode_equ:[61,62,76,78],offer:[78,90],offici:[4,76],often:73,omega:[65,66,68,69,70,71,72,74,77,83,86],omega_:65,omega_i:65,on_load_checkpoint:72,on_train_batch_end:[66,72],on_train_epoch_end:[19,21,64,78,86],on_train_epoch_start:20,on_train_start:[19,72,73],on_validation_end:78,onc:[6,7,8,10,11,12,13,14,15,16,17,18,31,76,79,83],one:[2,9,33,39,43,45,46,48,50,51,54,55,60,73,76,77,78,79,80,81,82,83,85,87,88],ones:[6,7,8,10,11,12,13,14,15,16,17,18,76,77,87],onli:[3,6,7,8,10,11,12,13,14,15,16,17,18,24,26,31,35,40,49,55,58,59,60,65,67,68,71,73,76,77,78,79,81,83,84,85,87],onlin:76,onto:58,open:[1,3,90],openreview:66,oper:[6,7,8,10,11,12,13,14,15,16,17,18,30,33,34,35,37,39,45,46,48,49,52,53,55,61,62,63,74,76,78,79,80,81,82,86,88],operation_interfac:[25,27,28,32],operationinterfac:[2,25,27,28,32],operatornam:[45,46],opt1:73,opt2:73,optim:[2,6,7,8,9,10,11,12,13,14,15,16,17,18,35,64,65,66,67,68,69,70,71,72,73,74,76,78,80,81,83,86,87],optimizer1:[20,73],optimizer2:[20,73],optimizer_discrimin:[66,67],optimizer_discriminator_kwarg:[66,67],optimizer_gener:67,optimizer_generator_kwarg:67,optimizer_kwarg:[64,65,68,69,70,71,73,74,77,81,86,87],optimizer_model:[66,72],optimizer_model_kwarg:[66,72],optimizer_step:73,optimizer_weight:72,optimizer_weights_kwarg:72,optimizers_kwarg:[64,73],option:[6,7,8,9,10,11,12,13,14,15,16,17,18,73,76,77,80,81,82,83,84,88],ord:83,order:[3,5,6,7,8,10,11,12,13,14,15,16,17,18,30,39,40,45,46,50,51,54,67,68,71,76,77,78,81,83,84,86],ordereddict:[6,7,8,10,11,12,13,14,15,16,17,18],org:[31,35,37,42],origin:[6,7,8,9,10,11,12,13,14,15,16,17,18,19,33,34,35,37,38,39,40,41,42,47,48,49,51,52,53,54,55,65,66,67,68,69,70,71,72,80,83,84,87,88],orthogon:[2,41,87],other:[0,6,7,8,10,11,12,13,14,15,16,17,18,26,31,47,49,52,54,55,76,78,89],otherwis:[0,6,7,8,10,11,12,13,14,15,16,17,18,23,24,25,26,27,28,30,31,32,77,78],ouput:83,our:[2,76,77,78,79,80,81,82,85,86,88,90],out:[0,6,7,8,10,11,12,13,14,15,16,17,18,33,35,40,48,73,83,85],out_featur:[6,7,8,10,11,12,13,14,15,16,17,18,35,49,55,80],outlin:78,output:[6,7,8,10,11,12,13,14,15,16,17,18,22,23,34,35,36,37,38,39,40,42,43,44,45,46,47,48,49,50,51,52,53,54,55,57,58,59,60,64,66,71,72,73,74,76,77,79,80,82,83,84,86,87,88],output_:[22,23,58,61,62,63,76,78,79,80,81,82,86,88],output_dim:[42,82],output_dimens:[38,40,49,50,51,55,76,78,80,81,83,84,86,87,88],output_numb_field:[35,37,43,53,83],output_point:[22,67,71,77,83,84,86,87],output_pt:[71,74],output_tensor:[64,66,71,72,74],output_vari:[57,60,61,62,63,76,77,78,79,80,81,82,83,84,86,87,88],outputvari:48,over:[3,6,7,8,10,11,12,13,14,15,16,17,18,23,45,46,77,81,83,85,87],overlap:[35,76,88],overrid:[44,64,71,72,73,74],overridden:72,overriden:57,ovverid:[66,72],own:[6,7,8,10,11,12,13,14,15,16,17,18,78,82,85],p_test:87,p_train:87,packag:[2,4,89,90],pad:[52,53,84],padding_typ:[52,53],page:1,pang:49,paper:[1,38,40,67,80,83],param:[6,7,8,10,11,12,13,14,15,16,17,18,23,79,83,87],paramet:[6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,64,65,66,67,68,69,70,71,72,73,74,75,78,79,81,83,84,86,87],parameter:84,parameter_domain:[61,76,87],parameters_epoch:86,parametr:[35,37,52,53,61,71,76,81,84],parametricod:61,parametricproblem:[2,76,87],params_:[23,86],params_torch:86,paramtr:48,parent:[66,72],pari:[38,51,65],part:[3,6,7,8,10,11,12,13,14,15,16,17,18,78,83,87,89],partial:[37,49,52,53,55,65,66,68,69,70,71,72,77,79,80,82,84,86,90],particular:[0,6,7,8,10,11,12,13,14,15,16,17,18,34,35,39,48,52,53,83,86,87,89],pass:[6,7,8,9,10,11,12,13,14,15,16,17,18,22,23,31,33,34,35,36,38,39,40,41,42,49,50,51,54,55,57,60,64,66,69,71,72,73,74,76,78,79,80,82,83,85,87],path:[6,7,8,10,11,12,13,14,15,16,17,18],patholog:51,pattern:[42,88],pbc:[40,88],pcolor:59,pde:[5,19,38,39,48,54,68,71,77,79,84,87],pdf:[37,42],peculiar:83,pedagog:80,pengzhan:55,peopl:[3,89],per:75,perceptron:[35,50,87],perdikari:[38,40,51,65,69],perfect:81,perfectli:88,perform:[6,7,8,9,10,11,12,13,14,15,16,17,18,21,23,30,33,34,35,37,39,41,47,48,49,50,51,52,53,54,55,58,66,77,78,81,83,87],period:[2,5,77],periodicboundaryembed:[40,88],permeabl:84,permiss:0,permit:[0,72,77],persist:[6,7,8,10,11,12,13,14,15,16,17,18],person:0,phase:[79,81,83],phi:39,phi_k:77,physic:[1,6,7,8,9,10,11,12,13,14,15,16,17,18,19,22,37,38,40,43,49,51,55,64,65,66,68,69,70,71,72,78,80,81,83,86,87,88],pic1:83,pic2:83,pichi:71,pickl:[6,7,8,10,11,12,13,14,15,16,17,18],picklabl:[6,7,8,10,11,12,13,14,15,16,17,18],pictur:83,pin:[6,7,8,10,11,12,13,14,15,16,17,18],pina:[0,4,6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,30,31,32,33,34,35,37,39,40,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,59,60,61,62,63,64,65,66,67,68,69,70,71,72,74,75,77,79,81,82,83,84,87,88],pinn:[2,24,26,54,60,64,65,66,68,70,72,76,78,79,80,82,86,88,90],pinn_feat:81,pinn_lean:81,pinn_learn:81,pinn_solution_0:86,pinn_stok:87,pinninterfac:[2,66,69,72],pip:84,pipelin:[2,90],pixel:83,place:[6,7,8,10,11,12,13,14,15,16,17,18,33],placehold:[19,20,21],plai:85,plan:[3,83],pleas:[3,6,7,8,10,11,12,13,14,15,16,17,18,81],plenti:87,plethora:78,plot:[59,76,77,80,81,82,83,85,86,87,88],plot_loss:[59,76,81],plot_sampl:[59,76],plot_scatt:85,plot_trajectori:77,plotter:[2,76,80,81,82,88],plt:[77,83,84,85,86,87,88],plu:[47,48,54],plug:87,pod:[5,41,87],pod_nn:87,pod_rank:87,podblock:87,point:[6,7,8,10,11,12,13,14,15,16,17,18,19,22,23,24,25,26,27,28,29,30,31,32,34,35,39,47,48,54,59,60,64,66,69,70,72,76,77,78,80,81,82,83,85,86,87,88],pointwis:72,poisson:[5,80,86],poisson_equ:80,poisson_problem:87,poisson_sol:81,polygon:85,poor:82,popul:19,porou:84,porpus:88,portion:0,posit:[6,7,8,10,11,12,13,14,15,16,17,18,35,83,89],possibl:[6,7,8,10,11,12,13,14,15,16,17,18,45,46,57,66,71,73,76,78,82,83,88,90],post:[6,7,8,10,11,12,13,14,15,16,17,18],potenti:[5,6,7,8,9,10,11,12,13,14,15,16,17,18,84],pow:33,power:[24,46],powerloss:[2,67,77],practic:[78,80],pratic:83,pre:[4,6,7,8,10,11,12,13,14,15,16,17,18,86],precis:[73,78,81],predict:[81,82,83,86,87,88],prefer:78,prefix:[6,7,8,10,11,12,13,14,15,16,17,18],preliminari:81,prepend:[6,7,8,10,11,12,13,14,15,16,17,18],preprint:[6,7,8,9,10,11,12,13,14,15,16,17,18,34,37,40,47,52,53,66,67],preprocess:33,present:[35,51,77,80,81,82,83,84,88],preserv:[6,7,8,10,11,12,13,14,15,16,17,18],pressur:[84,87],pretti:[77,80,84,88],previou:[77,78,81,82,83],previous:82,principl:[5,88],print:[6,7,8,10,11,12,13,14,15,16,17,18,76,77,78,80,82,83,84,85,87],probabl:[36,89],problem:[5,19,22,23,34,35,39,47,48,54,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,78,79,83,90],procedur:[76,83],proceed:[26,42],process:[3,6,7,8,10,11,12,13,14,15,16,17,18,19,20,44,86],produc:[22,73],product:[39,49,55,77,89],prof:89,profession:90,professor:89,program:78,progress:[73,77,78,82],project:[1,3,41,47,48,52,53,54,77,83,87,89],projecting_net:[47,52,53,54,77,84],projection_oper:48,propag:[19,87],proper:[2,41,87],properli:[3,78,87],properti:[6,7,8,9,10,11,12,13,14,15,16,17,18,21,24,26,30,33,38,39,40,41,48,49,55,60,61,62,63,64,65,66,69,71,72,73,74,75],propos:[81,82,90],provid:[0,3,6,7,8,10,11,12,13,14,15,16,17,18,21,23,36,38,78,81,82,90],pseudospectr:77,psi:39,pts:[76,78,80,81,82,85,88],pts_0:86,pts_heart:85,pts_list:85,publish:0,pull:3,puor_r:77,purpos:0,push:3,put:88,pylint:3,pyplot:[77,83,84,85,86,87,88],pytest:4,python:[4,23,76,78,90],pytorch:[5,72,75,83,90],pytorch_lightn:[19,20,21,73,75,78,86],pytorchlightin:[66,72],pytorchlightn:90,quad:[38,45,46,65,66,68,69,70,71,72,76,77,79,80,82,83,84,88],qualifi:[6,7,8,10,11,12,13,14,15,16,17,18],qualiti:[26,31],quatit:76,queri:[6,7,8,10,11,12,13,14,15,16,17,18],quickli:85,quit:[81,83],r3_callback:19,r3refin:19,r_i:70,r_j:70,rage:23,rais:[6,7,8,10,11,12,13,14,15,16,17,18,19,20,33,35,58],rand:[33,83,85],randint:87,random:[24,25,26,27,28,31,32,38,60,76,77,78,82,83,85,86],randomli:76,randperm:83,rang:[23,78,83,86,87],rangl:39,rank:[2,41,78],rate:[65,66,67,68,69,70,71,72,73,74,78,80,81,82,83],ratio:67,ratio_train_test:87,ravel:77,rbapinn:2,reach:[76,77,78,81,82,83,84,86,87,88],read:[81,85,86],readi:85,real:[44,45,46,48,76,77,83,87],realli:[77,83],reamin:87,reason:[6,7,8,10,11,12,13,14,15,16,17,18],rebas:3,receiv:[6,7,8,10,11,12,13,14,15,16,17,18],recognit:42,recommend:78,recomput:41,record:[6,7,8,10,11,12,13,14,15,16,17,18,33],rectangular:83,recurs:[6,7,8,10,11,12,13,14,15,16,17,18],red:88,reduc:[5,41,44,49,55,67,71,78,83],reducedordermodelsolv:2,reducelronplateau:73,reduct:[23,44,45,46,49,55,67,71,87],reduction_network:71,refer:[6,7,8,9,10,11,12,13,14,15,16,17,18,19,24,34,35,37,38,39,40,42,47,48,49,50,51,52,53,54,55,60,65,66,67,68,69,70,71,72,73,76,80,84,86,88],referenc:[6,7,8,10,11,12,13,14,15,16,17,18],refin:2,reflect:38,regardless:[6,7,8,10,11,12,13,14,15,16,17,18],region:19,regist:[6,7,8,10,11,12,13,14,15,16,17,18],register_backward_hook:[6,7,8,10,11,12,13,14,15,16,17,18],register_buff:[6,7,8,10,11,12,13,14,15,16,17,18],register_forward_hook:[6,7,8,10,11,12,13,14,15,16,17,18],register_forward_pre_hook:[6,7,8,10,11,12,13,14,15,16,17,18],register_full_backward_hook:[6,7,8,10,11,12,13,14,15,16,17,18],register_full_backward_pre_hook:[6,7,8,10,11,12,13,14,15,16,17,18],register_load_state_dict_post_hook:[6,7,8,10,11,12,13,14,15,16,17,18],register_modul:[6,7,8,10,11,12,13,14,15,16,17,18],register_module_forward_hook:[6,7,8,10,11,12,13,14,15,16,17,18],register_module_forward_pre_hook:[6,7,8,10,11,12,13,14,15,16,17,18],register_module_full_backward_hook:[6,7,8,10,11,12,13,14,15,16,17,18],register_module_full_backward_pre_hook:[6,7,8,10,11,12,13,14,15,16,17,18],register_paramet:[6,7,8,10,11,12,13,14,15,16,17,18],register_state_dict_pre_hook:[6,7,8,10,11,12,13,14,15,16,17,18],regress:[38,80],regular:[67,77,81,83],reiniti:78,rel:[3,45,46,78,80,84,87],relat:[3,23,72],relative_error:87,relative_error_test:87,relative_error_train:87,releas:[6,7,8,10,11,12,13,14,15,16,17,18,19,83],relev:[21,76,85],reli:78,relu:[2,12,35,36,42,82,83],rememb:3,remov:[6,7,8,10,11,12,13,14,15,16,17,18,81],removablehandl:[6,7,8,10,11,12,13,14,15,16,17,18],remove_dupl:[6,7,8,10,11,12,13,14,15,16,17,18],repeat:83,report:[78,83],repositori:4,repres:[22,24,26,31,35,40,64,65,66,68,69,70,71,72,76,77,78,81,83,88],represent:[6,7,8,10,11,12,13,14,15,16,17,18,41,48,66,71,77,83,87],reproduc:[78,83],request:3,requir:[4,6,7,8,10,11,12,13,14,15,16,17,18,33,73,78,79,85],requires_grad:[6,7,8,9,10,11,12,13,14,15,16,17,18,33],requires_grad_:[6,7,8,10,11,12,13,14,15,16,17,18,33],res:59,resampl:19,research:[1,39,48,54,89,90],resembl:80,reset:[6,7,8,10,11,12,13,14,15,16,17,18],reshap:[77,83],residu:[2,19,22,23,64,70,71,74,76,79,81,82],residualblock:42,residualfeedforward:2,resolut:[5,59],resolv:[6,7,8,10,11,12,13,14,15,16,17,18],respect:[6,7,8,10,11,12,13,14,15,16,17,18,58,65,70,72,79,83,90],rest:25,restrict:0,result:[2,6,7,8,10,11,12,13,14,15,16,17,18,24,26,28,30,31,33,37,38,49,55,58,71,76,77,78,80,81,82,83,88],retain:19,retrain:[83,88],retur:60,returin:67,review:69,revolv:78,rewrit:[79,88],right:[0,34,38,39,40,45,46,65,70,72,77,78,80,81,83,88],rightarrow:[6,7,8,9,10,11,12,13,14,15,16,17,18,38,40,48,65,66,68,69,70,71,72,74,76,77,83,88],role:80,rome2esolv:74,routin:[19,24,25,26,27,28,32,78,90],rozza:[1,67,87,89],rtype:[83,87],rule:[49,55],run:[3,6,7,8,10,11,12,13,14,15,16,17,18,73,77,78,80,83,85,87],rung:77,running_loss:83,running_mean:[6,7,8,10,11,12,13,14,15,16,17,18],running_var:[6,7,8,10,11,12,13,14,15,16,17,18],runtim:[6,7,8,10,11,12,13,14,15,16,17,18],runtimeerror:[6,7,8,10,11,12,13,14,15,16,17,18,35],rvert:70,s00466:35,s42256:49,sai:[6,7,8,10,11,12,13,14,15,16,17,18,33],said:87,sake:85,same:[6,7,8,10,11,12,13,14,15,16,17,18,33,35,49,51,66,73,78,79,80,81,82],sampl:[19,22,24,25,26,27,28,29,31,32,38,59,60,64,65,66,67,68,69,70,72,75,76,77,78,80,81,82,83,84,85,88],sample_bord:85,sample_everi:19,sample_numb:77,sample_surfac:[26,31,85],sampled_point:85,sampler:83,sankaran:[40,65],sapinn:[2,80],satisfi:[22,76,81,82,86,88],save:[6,7,8,10,11,12,13,14,15,16,17,18,33,59,76,77,83,86],save_dir:78,saved_metr:78,saved_weight:33,saveparamet:86,scalar:[34,39,58],scale:[6,7,8,9,10,11,12,13,14,15,16,17,18,38,41,49,55,59,80],scale_coeffici:41,scaler:41,scatter:[35,83,85,86],scenario:80,schedul:[65,66,67,68,69,70,71,72,73,74,78,80],scheduler1:73,scheduler2:73,scheduler_discrimin:[66,67],scheduler_discriminator_kwarg:[66,67],scheduler_gener:67,scheduler_generator_kwarg:67,scheduler_kwarg:[65,68,69,70,71,74,80],scheduler_model:[66,72,80],scheduler_model_kwarg:[66,72,80],scheduler_weight:72,scheduler_weights_kwarg:72,schrodingerequ:79,scientif:[51,55],scipi:[4,77,84],scratch:77,script:3,scriptmodul:[37,56],search:19,second:[35,73,83,84],section:[2,76,80],see:[3,6,7,8,10,11,12,13,14,15,16,17,18,24,33,45,46,48,54,60,76,77,78,79,80,81,82,83,84,85,86,88],seeam:88,seed:[78,83],seed_everyth:78,seek:[74,88],seen:[33,78,79],select:[33,37,43,59,90],self:[6,7,8,10,11,12,13,14,15,16,17,18,33,35,41,60,72,73,76,77,78,79,80,81,82,83,85,86,87,88,90],sell:0,send:33,separ:3,sequenti:[6,7,8,10,11,12,13,14,15,16,17,18,35,49,55,80,82,83,88],seri:77,serial:[6,7,8,10,11,12,13,14,15,16,17,18],serv:78,set:[6,7,8,9,10,11,12,13,14,15,16,17,18,23,25,26,27,28,30,31,32,33,35,41,45,46,57,59,60,65,72,73,78,79,81,83,87,90],set_extra_st:[6,7,8,10,11,12,13,14,15,16,17,18],set_text:85,set_titl:[77,83,87,88],set_to_non:[6,7,8,10,11,12,13,14,15,16,17,18],set_xlabel:77,set_ylabel:77,setminu:27,setup:[3,75],sever:[6,7,8,10,11,12,13,14,15,16,17,18,76,78,81,82,87],sgd:73,shall:[0,84],shallow:[6,7,8,10,11,12,13,14,15,16,17,18],shalom:26,shape:[25,26,27,28,31,32,35,39,45,46,50,51,77,83,85,87],share:[33,37,56],share_memori:[6,7,8,10,11,12,13,14,15,16,17,18],share_memory_:[6,7,8,10,11,12,13,14,15,16,17,18],sharei:77,sharex:77,shift:[6,7,8,9,10,11,12,13,14,15,16,17,18],should:[3,6,7,8,10,11,12,13,14,15,16,17,18,22,23,26,29,31,33,35,44,45,46,58,59,60,64,71,72,73,74,78,81,82,88],show:[6,7,8,10,11,12,13,14,15,16,17,18,76,77,78,79,80,81,83,84,85,86,87,88],shown:[40,59,73,78,83],shrink:[26,31],shuai:55,shyam:65,siam:[51,55],side:83,sifan:[38,51,65],sigma:[14,34,38,39,77,80],sigma_k:38,sigmoid:[2,14,15,72,83],signatur:[6,7,8,10,11,12,13,14,15,16,17,18],signific:[1,3,89],silu:[2,14],sim:[38,40,80],similar:[6,7,8,10,11,12,13,14,15,16,17,18,73,81,83,87],similarli:[6,7,8,10,11,12,13,14,15,16,17,18],simipli:78,simpl:[6,7,8,10,11,12,13,14,15,16,17,18,64,77,78,80,83,84,85,86,87,88,90],simplekernel:83,simpleod:[76,78],simplest:78,simplex:31,simplex_matrix:31,simplexdomain:[2,60,85],simpli:[23,57,73,77,78,83,85,87],simplic:85,simul:[2,76,80],simultan:71,sin:[13,38,40,63,77,79,80,81,82,83,88],sinc:[6,7,8,10,11,12,13,14,15,16,17,18,78,82,83,84,87,88],sine:38,singl:[6,7,8,10,11,12,13,14,15,16,17,18,20,39,50,51,54,64,72,73,83],singular:[41,87],sinsin:81,sinsinab:81,siren:[2,77],sissa:89,sivashinski:5,size:[6,7,8,10,11,12,13,14,15,16,17,18,34,35,37,39,43,45,46,52,53,73,76,77,80,81,82,83,88],size_averag:44,skeleton:85,skip:[6,7,8,10,11,12,13,14,15,16,17,18,51,73],slowli:80,small:[3,77,78,83],smaller:83,smither:87,smooth:88,snapshot:87,snapshotproblem:87,societi:87,softmax:[2,16],softmin:[2,17],softplu:[81,86],softwar:[0,1],sokrati:70,sol_test:77,sol_train:77,sole:88,solut:[38,59,64,65,66,67,68,69,70,71,72,74,76,77,78,79,80,81,82,83,84,86,87,88,90],solv:[2,38,65,66,67,68,69,70,71,72,74,76,78,79,80,82,86,90],solver:[20,59,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,86,87,88,90],solverinterfac:[2,59,64,67,74,75],some:[3,6,7,8,10,11,12,13,14,15,16,17,18,22,33,39,50,51,54,73,76,77,78,80,81,82,83,84,86,87,88,89],someth:[6,7,8,10,11,12,13,14,15,16,17,18,35,73,83],sometim:83,sourc:[1,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,89,90],space:[3,25,27,28,32,37,39,41,43,48,52,53,54,55,71,74,77,87],span:[22,24,60],spatial:[24,26,30,35,39,40,48,57,59,60,61,62,71,76,77,79,81,82,83,86],spatial_domain2:85,spatial_domain:[24,26,31,61,62,63,76,78,79,80,81,82,85,86,88],spatial_domain_2:24,spatial_vari:62,spatialod:62,spatialproblem:[2,61,63,68,76,78,79,80,81,82,86,88],spatio:80,specfic:84,special:[73,89],specif:[23,46,64,65,66,67,68,69,70,71,72,73,74,76,77,78,79,82,84,85,88,90],specifi:[6,7,8,10,11,12,13,14,15,16,17,18,22,23,38,44,45,46,50,64,65,66,67,68,69,70,71,72,73,74,75,79,83,86,88],spectral:[2,37,42],spectral_norm:42,spectralconvblock1d:43,spectralconvblock2d:43,spectralconvblock3d:43,speed:[76,84],sphinx:4,split:87,sqrt:[10,82,83],squar:[65,66,68,69,71,72,74,76,81,82,83,84,85,86],squeez:84,stabil:[86,87],stack:[33,83],standard:[38,41,57,76,78,80,82,88],start:[3,19,20,33,35,72,73,76,78,79,83,85,86,87,90],state:[6,7,8,10,11,12,13,14,15,16,17,18,37,56],state_dict:[6,7,8,10,11,12,13,14,15,16,17,18],statement:79,statist:83,std:[33,41,58],stefano:71,step:[2,64,67,73,74,76,77,78,83,90],stergiopulo:70,still:[45,46,81,83],stochast:78,stochasticweightaverag:78,stoke:87,stop:[20,73,77,78,81,82,83,84,87,88],storag:[6,7,8,10,11,12,13,14,15,16,17,18,33],store:[6,7,8,10,11,12,13,14,15,16,17,18,21,41,64,79,87],store_log:64,str:[6,7,8,10,11,12,13,14,15,16,17,18,23,24,25,26,27,28,30,31,32,33,40,44,45,46,47,49,52,53,54,55,57,58,59,60,64],straight:78,strategi:[20,26,31,47,49,52,54,55,71,80,83,85,90],strazzullo:89,strict:[6,7,8,10,11,12,13,14,15,16,17,18,73],strictli:[3,6,7,8,10,11,12,13,14,15,16,17,18],stride:[6,7,8,10,11,12,13,14,15,16,17,18,35],string:[6,7,8,10,11,12,13,14,15,16,17,18,24,26,49,55],strongli:78,structur:[2,48,83,90],stuart:[34,37,39,47,48,52,53,54],student:89,stuff:76,style:3,subdomain:79,subject:0,sublicens:0,submodul:[6,7,8,10,11,12,13,14,15,16,17,18],subplot:[77,83,84,85,87,88],subsample_test_indic:83,subsample_train_indic:83,subsequ:[6,7,8,10,11,12,13,14,15,16,17,18],subset:[23,33,48,58,83],subsetrandomsampl:83,substanti:0,subtract:76,suchuan:40,suffici:80,suggest:[1,76,83,84],suit:84,suitabl:[64,77],sum:[23,33,34,37,44,45,46,47,49,54,55,64,67,74,81,82,83],sum_:[39,45,46,65,66,68,69,70,71,72,74,77,83],sum_j:[16,17],summari:[76,77,78,80,81,82,83,84,87,88],supervis:[2,47,49,52,54,55,71,74,84,89,90],supervisedsolv:[71,77,83,84,87],support:[38,59,66,71,73,89],suppos:83,sure:3,surfac:[26,31],swa_epoch_start:78,swa_lr:78,swalr:78,swap:78,switch_callback:20,switchoptim:20,symbol:76,system:[23,78,84,86],systemequ:[23,79],t0_loss:82,t_i:65,t_k:65,tackl:77,take:[47,49,54,55,58,64,77,78,79,80,81,82,83,85,86,87],taken:[26,28,31,84],tanh:[2,10,11,18,37,39,49,50,51,52,53,54,55,76,78,80,87],target:[6,7,8,10,11,12,13,14,15,16,17,18,44,45,46,83],tau:83,team:[78,90],technic:[6,7,8,10,11,12,13,14,15,16,17,18],techniqu:[87,90],tell:[33,76,83],tempor:[48,57,59,60,63,76,77,79,80],temporal_domain:[63,76,79,82],temporal_vari:63,temporari:86,teng:51,tensor:[6,7,8,9,10,11,12,13,14,15,16,17,18,22,24,25,26,27,28,29,30,31,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,57,58,60,64,66,67,69,71,72,73,74,76,77,78,81,82,83,84,85,86,87],tensor_nam:85,tensorboard:78,tensorboardlogg:78,term:[23,61,67,80,81,86,90],termin:78,test:[3,4,40,77,80,81,82,83,84,87,88],test_data:83,test_load:83,text:[6,7,8,9,10,11,12,13,14,15,16,17,18,40,45,46,81,82,86],texttt:77,than:[20,26,31,60,81,84],thank:[78,81,87,89,90],thei:[6,7,8,10,11,12,13,14,15,16,17,18,33,39,50,51,54,76,78,79,81,85,88],them:[20,33,41,70,73,78,81,83,85,86,87],theorem:49,theori:[67,70],theta:[48,77,83,88],thi:[0,3,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,29,30,33,35,38,39,40,44,48,50,51,53,54,56,57,60,64,65,66,67,68,69,70,71,72,73,74,76,77,78,79,80,81,82,83,84,85,86,87,88,89],thing:73,think:3,third:86,those:[19,44,76],though:80,thought:83,thre:82,three:[22,35,37,43,76,77,78,85],three_domain_union:85,three_domain_union_point:85,through:[33,35,36,72,76,79,80,81,83,84,86],throughout:3,thrown:[6,7,8,10,11,12,13,14,15,16,17,18],thu:[73,83],tight_layout:[83,88],tild:[38,40,80,83],till:78,time:[5,6,7,8,10,11,12,13,14,15,16,17,18,34,39,47,48,54,63,66,67,69,71,72,76,77,78,80,82,83,86,87,88],time_elaps:78,timedependentproblem:[2,65,76,79,82],timedependentspatialproblem:63,timer:78,times:77,timespaceod:76,tip:5,titl:[1,59,80,83,84,85],title_list:85,tmp_dir:86,tmp_poisson_invers:86,to_empti:[6,7,8,10,11,12,13,14,15,16,17,18],togeth:[24,56,90],toi:[76,81,83],tolist:77,tool:3,top:[45,46],topic:[76,85],torch:[4,6,7,8,9,10,11,12,13,14,15,16,17,18,20,22,23,25,27,28,29,30,31,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,76,77,78,79,80,81,82,83,84,85,86,87,88],torch_doctest_cuda1:[6,7,8,10,11,12,13,14,15,16,17,18],torchvis:83,tort:0,toscano:70,total:[19,24,45,46,78,83],total_it:[65,66,67,68,69,70,71,72,74],totensor:83,touch:90,tpu:[58,73,77,78,80,84,88],tra:83,track:[2,21,26,76,81],tracker:21,tradit:[47,49,52,54,55],train:[2,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,35,40,47,49,52,54,55,59,64,65,66,67,69,71,72,73,74,75,77,79,80,81,83,84,86,87,88,90],train_data:83,train_load:83,trainabl:[6,7,8,9,10,11,12,13,14,15,16,17,18,35,41,83],trainer:[2,19,20,21,59,73,76,77,80,81,82,83,84,86,87,88],trainer_feat:81,trainer_learn:81,trainer_sapinn:80,trainig:71,training_step:[64,67,73,74],traininig:59,trainint:77,trajectori:77,tranform:37,transform:[34,37,38,39,51,80,83,88],transform_input:83,transformer_net:51,transit:[6,7,8,10,11,12,13,14,15,16,17,18,89],translat:[49,55,88],transpos:[35,83],transpose_no_overlap:35,transpose_overlap:35,treat:86,trend:[81,86],tri:[6,7,8,10,11,12,13,14,15,16,17,18,66],triang:87,tribut:26,tricontourf:87,trigger:[19,33],tripcolor:87,tripl:83,trivial:[81,85],truncat:[40,77],trunk:[49,55],trunk_net:[49,55],truth:86,truth_solut:[76,78,80,81,82,88],tunabl:[34,39,77],tune:81,tupl:[6,7,8,10,11,12,13,14,15,16,17,18,33,35,37,43,50,51,57,64,66,67,69,72,73,74,83],tutori:90,twenti:35,twice:79,two:[5,6,7,8,10,11,12,13,14,15,16,17,18,25,27,28,32,37,38,43,44,51,71,73,76,78,80,83,85,86],type:[4,6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,23,24,25,26,27,28,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,57,58,60,64,65,66,67,68,69,70,71,72,74,76,81,82,83,87],typeerror:[33,58],typic:[6,7,8,10,11,12,13,14,15,16,17,18,76,87],u_expect:[63,79,82],u_idx:87,u_t:[63,82],u_test:[84,87],u_test_pr:87,u_train:[84,87],u_train_pr:87,u_tt:[63,82],u_x:[61,62,76,78],u_xx:[80,88],ubbiali:[71,87],uliss:72,umh:81,unabl:81,unchang:[6,7,8,10,11,12,13,14,15,16,17,18],undefin:[6,7,8,10,11,12,13,14,15,16,17,18],under:[6,7,8,10,11,12,13,14,15,16,17,18,89],underli:90,understand:[5,51,80,81,83],unexpect:[6,7,8,10,11,12,13,14,15,16,17,18],unexpected_kei:[6,7,8,10,11,12,13,14,15,16,17,18],uniformli:[19,26],uninstal:4,union:[2,6,7,8,10,11,12,13,14,15,16,17,18,60,85,86],uniqu:[33,77],unit:[41,73,83,84],univers:[34,47,49,77,89],unknown:[23,48,81,82,86],unknown_paramet:86,unknown_parameter_domain:86,unknown_vari:86,unkwown:81,unless:[6,7,8,10,11,12,13,14,15,16,17,18],unlik:[47,49,52,54,55],unreduc:[45,46],unsqueez:[83,84],unstructur:[5,35],unsupervis:[76,90],unsur:3,until:78,updat:[6,7,8,10,11,12,13,14,15,16,17,18,24,34,39,70,73,77,78],upper:88,usag:[41,78,81,83,85],use:[0,3,6,7,8,9,10,11,12,13,14,15,16,17,18,33,35,39,47,49,50,51,54,55,59,64,65,66,67,68,69,70,71,72,73,74,76,77,78,79,81,83,84,85,86,87,88],used:[6,7,8,10,11,12,13,14,15,16,17,18,19,20,22,23,26,31,35,38,39,41,49,50,51,54,55,57,58,59,64,65,66,67,68,69,70,71,72,73,74,77,78,79,80,81,83,84,85,86,88],useful:[5,76,77,78,79,80,81,82,83,86,88],user:[2,4,6,7,8,10,11,12,13,14,15,16,17,18,23,56,64,65,66,67,68,69,70,71,72,74,76,81,82,83,90],uses:71,using:[2,3,4,5,6,7,8,10,11,12,13,14,15,16,17,18,19,38,48,59,65,66,67,68,69,70,71,72,73,74,76,77,78,80,82,83,85,86,87,90],usual:[82,88],util:[6,7,8,10,11,12,13,14,15,16,17,18,83],v_num:[77,78,80,81,82,84,87,88],val_loss:73,valid:[60,73,82,87],valu:[6,7,8,10,11,12,13,14,15,16,17,18,21,22,23,24,26,35,38,40,41,55,59,60,64,65,67,73,76,77,78,79,83,85,86,87],valueerror:[19,20,33,58],values_mesh:83,vari:86,variabl:[6,7,8,9,10,11,12,13,14,15,16,17,18,21,23,24,25,26,27,28,30,31,32,35,48,49,55,57,58,59,60,61,62,63,64,76,77,78,79,81,82,83,85,86,87],varianc:[38,41,67],variant:80,variat:[38,80],varieti:84,variou:[3,64,81],vector:[23,34,38,39,58,83],vectori:[35,58,83],veloc:[58,82,87],velocity_field:58,verbos:83,veri:[76,78,80,81,83,84,85,89],version:[6,7,8,10,11,12,13,14,15,16,17,18,78,87],vertex:31,vertic:33,via:[5,49,55,75],view:[6,7,8,10,11,12,13,14,15,16,17,18],viscos:79,viscou:79,vision:42,visual:[5,59,76,80,83,84,85,87],vol:[6,7,8,9,10,11,12,13,14,15,16,17,18,26],volum:1,vstack:33,wai:[3,22,77,78,80,81,82,83,84,90],wang:[38,40,51,65,69],want:[9,33,64,76,77,83,86,87],warmli:89,warn:73,warranti:0,wave:[5,63,86],wave_equ:[63,82],wave_sol:82,webpag:78,weight:[6,7,8,10,11,12,13,14,15,16,17,18,33,35,70,72,78,81,83,87],weight_decai:81,weights_dict:72,weights_funct:72,welcom:2,well:[3,6,7,8,10,11,12,13,14,15,16,17,18,78,82,83,87],were:[78,89],what:[3,73],whatev:[66,72,73],when:[6,7,8,9,10,11,12,13,14,15,16,17,18,26,31,35,41,44,46,57,73,74,78,80,82,84],whenev:78,where:[6,7,8,9,10,11,12,13,14,15,16,17,18,19,23,34,35,38,39,40,45,46,47,48,50,51,54,59,60,65,66,68,69,70,71,72,74,76,77,79,80,81,82,83,85,86,87],wherea:[59,73],whether:[0,6,7,8,10,11,12,13,14,15,16,17,18,29],which:[3,6,7,8,10,11,12,13,14,15,16,17,18,20,23,25,26,31,38,39,41,47,51,53,54,58,60,64,66,71,73,76,77,78,79,80,81,82,83,85,86,87,88,89],whole:[6,7,8,10,11,12,13,14,15,16,17,18],whom:0,whose:[6,7,8,10,11,12,13,14,15,16,17,18,41,57,60,73],wide:23,width:35,wiki:31,wikipedia:31,wise:[49,55],wish:83,with_kwarg:[6,7,8,10,11,12,13,14,15,16,17,18],within:[6,7,8,10,11,12,13,14,15,16,17,18],withing:83,without:[0,6,7,8,10,11,12,13,14,15,16,17,18,20,59,73,76,78,83,85,88,90],won:[6,7,8,10,11,12,13,14,15,16,17,18],work:[3,6,7,8,10,11,12,13,14,15,16,17,18,40,48,58,65,67,68,71,73,78,79,83,85],worker:78,workload:64,workshop:26,wors:82,worst:66,worth:80,would:[1,6,7,8,10,11,12,13,14,15,16,17,18,80,83,88],wrap:[6,7,8,10,11,12,13,14,15,16,17,18,76,78],wrapper:[23,36,73],write:[23,58,80,81,88],written:[48,80,81,82,88,89],x0_loss:[76,78],x_1:40,x_i:[16,17],x_j:[16,17],x_max:86,x_min:86,x_n:[45,46],xdoctest:[6,7,8,10,11,12,13,14,15,16,17,18],xlabel:86,xpu:[6,7,8,10,11,12,13,14,15,16,17,18],y_max:86,y_min:86,y_n:[45,46],yaakov:26,yang:69,year:1,yield:[6,7,8,10,11,12,13,14,15,16,17,18],ylabel:86,ylim:86,you:[1,2,3,4,6,7,8,10,11,12,13,14,15,16,17,18,73,76,77,78,79,80,81,82,84,85,88],your:[1,3,6,7,8,10,11,12,13,14,15,16,17,18,73,77,78,85],yourself:[73,76],yujun:51,zeng:66,zero:[6,7,8,10,11,12,13,14,15,16,17,18,23,41,78,79,82,83],zero_grad:[6,7,8,10,11,12,13,14,15,16,17,18,83],zip:[85,87],zongyi:37},titles:["License","Cite PINA","Code Documentation","How to contribute","Installation","PINA Tutorials","AdaptiveCELU","AdaptiveELU","AdaptiveExp","AdaptiveActivationFunctionInterface","AdaptiveGELU","AdaptiveMish","AdaptiveReLU","AdaptiveSIREN","AdaptiveSiLU","AdaptiveSigmoid","AdaptiveSoftmax","AdaptiveSoftmin","AdaptiveTanh","Adaptive Refinments callbacks","Optimizer callbacks","Processing callbacks","Condition","Equations","CartesianDomain","Difference","EllipsoidDomain","Exclusion","Intersection","Location","OperationInterface","SimplexDomain","Union","LabelTensor","Averaging layers","Continuous convolution","EnhancedLinear","Fourier Layers","Fourier Feature Embedding","Low Rank layer","Periodic Boundary Condition Embedding","PODBlock","Residual layer","Spectral Convolution","LpLoss","LpLoss","PowerLoss","Averaging Neural Operator","KernelNeuralOperator","DeepONet","FeedForward","ResidualFeedForward","FNO","FourierIntegralKernel","Low Rank Neural Operator","MIONet","MultiFeedForward","Network","Operators","Plotter","AbstractProblem","ParametricProblem","SpatialProblem","TimeDependentProblem","PINNInterface","CausalPINN","CompetitivePINN","GAROM","GPINN","PINN","RBAPINN","ReducedOrderModelSolver","SAPINN","SolverInterface","SupervisedSolver","Trainer","Tutorial: Physics Informed Neural Networks on PINA","Tutorial: Averaging Neural Operator for solving Kuramoto Sivashinsky equation","Tutorial: PINA and PyTorch Lightning, training tips and visualizations","Tutorial: The Equation Class","Tutorial: Multiscale PDE learning with Fourier Feature Network","Tutorial: Two dimensional Poisson problem using Extra Features Learning","Tutorial: Two dimensional Wave problem with hard constraint","Tutorial: Unstructured convolutional autoencoder via continuous convolution","Tutorial: Two dimensional Darcy flow using the Fourier Neural Operator","Tutorial: Building custom geometries with PINA Location class","Tutorial: Resolution of an inverse problem","Tutorial: Reduced order model (PODNN) for parametric problems","Tutorial: One dimensional Helmotz equation using Periodic Boundary Conditions","PINA Team","Welcome to PINA\u2019s documentation!"],titleterms:{"boolean":85,"class":[76,79,85],"function":[2,83],"new":79,One:88,The:[79,81,82,88],abstractproblem:60,acceler:78,accuraci:78,activ:2,adapt:[2,19],adaptiveactivationfunctioninterfac:9,adaptivecelu:6,adaptiveelu:7,adaptiveexp:8,adaptivegelu:10,adaptivemish:11,adaptiverelu:12,adaptivesigmoid:15,adaptivesilu:14,adaptivesiren:13,adaptivesoftmax:16,adaptivesoftmin:17,adaptivetanh:18,autoencod:83,averag:[34,47,77],background:83,boost:78,boundari:[40,88],build:[76,83,85],built:85,burger:79,callback:[2,19,20,21,78],cartesiandomain:24,causalpinn:65,cite:1,classifi:83,code:2,competitivepinn:66,condit:[22,40,88],constraint:82,continu:[35,83],contribut:3,convolut:[35,43,83],creat:85,custom:85,darci:84,data:[76,77,84],deeponet:49,defin:79,definit:[81,82,83,86,88],differ:[25,83],differenti:76,dimension:[81,82,84,88],document:[2,90],domain:85,ellipsoiddomain:26,embed:[38,40,80],enhancedlinear:36,equat:[2,23,76,77,79,88],exampl:79,exclus:27,extra:81,featur:[2,38,80,81],feedforward:[50,84],filter:83,flow:84,fno:[52,84],fourier:[37,38,80,84],fourierintegralkernel:53,from:4,fuorier:84,garom:67,gener:[76,77,84],geometri:[2,85],get:5,gpinn:68,hard:82,helmotz:88,how:3,infer:82,inform:[5,76,90],input:83,instal:4,intersect:28,introduct:86,invers:86,kernelneuraloper:48,kuramoto:77,labeltensor:33,layer:[2,34,37,39,42],learn:[5,80,81,90],learnabl:81,licens:0,lightn:78,locat:[29,85],log:78,loss:2,low:[39,54],lploss:[44,45],memori:78,metric:2,mionet:55,mnist:83,model:[2,82,87],multifeedforward:56,multiscal:80,network:[5,57,76,80,84,88,90],neural:[5,47,54,76,77,84,90],next:[76,77,78,79,80,81,82,83,84,85,88],oper:[2,5,47,54,58,77,84,85,90],operationinterfac:30,optim:20,order:87,ordinari:76,parametr:87,parametricproblem:61,patch:3,pde:80,perform:76,period:[40,88],physic:[5,76,90],pina:[1,2,5,76,78,80,85,86,89,90],pinn:[69,81],pinninterfac:64,pip:4,plotter:59,podblock:41,podnn:87,poisson:81,powerloss:46,problem:[2,76,77,80,81,82,84,86,87,88],process:21,pytorch:78,rank:[39,54],rbapinn:70,reduc:87,reducedordermodelsolv:71,refer:87,refin:19,residu:42,residualfeedforward:51,resolut:[83,86],sapinn:72,save:78,set:2,simpl:76,simplex:85,simplexdomain:31,sivashinski:77,small:76,solv:[77,81,84,88],solver:2,solverinterfac:73,sourc:4,spatialproblem:62,spectral:43,speed:78,standard:81,start:5,stride:83,submit:3,supervis:5,supervisedsolv:74,team:89,timedependentproblem:63,tip:78,todo:59,train:[76,78,82],trainer:[75,78],tutori:[5,76,77,78,79,80,81,82,83,84,85,86,87,88],two:[81,82,84],union:32,unstructur:83,upsampl:83,using:[81,84,88],via:[4,83],visual:78,wave:82,welcom:90,what:[76,77,78,79,80,81,82,83,84,85,88],write:76}}) \ No newline at end of file +Search.setIndex({"alltitles": {"AbstractProblem": [[63, null]], "Adaptive Activation Functions": [[2, "adaptive-activation-functions"]], "Adaptive Refinments callbacks": [[19, null]], "AdaptiveActivationFunctionInterface": [[9, null]], "AdaptiveCELU": [[6, null]], "AdaptiveELU": [[7, null]], "AdaptiveExp": [[8, null]], "AdaptiveGELU": [[10, null]], "AdaptiveMish": [[11, null]], "AdaptiveReLU": [[12, null]], "AdaptiveSIREN": [[13, null]], "AdaptiveSiLU": [[14, null]], "AdaptiveSigmoid": [[15, null]], "AdaptiveSoftmax": [[16, null]], "AdaptiveSoftmin": [[17, null]], "AdaptiveTanh": [[18, null]], "Autoencoding at different resolution": [[86, "autoencoding-at-different-resolution"]], "Averaging Neural Operator": [[49, null], [80, "averaging-neural-operator"]], "Averaging layers": [[34, null]], "Boolean Operations": [[88, "boolean-operations"]], "Build a PINA problem": [[79, "build-a-pina-problem"]], "Building a Continuous Convolutional Autoencoder": [[86, "building-a-continuous-convolutional-autoencoder"]], "Building a MNIST Classifier": [[86, "building-a-mnist-classifier"]], "Built-in Geometries": [[88, "built-in-geometries"]], "Callbacks": [[2, "callbacks"]], "CartesianDomain": [[24, null]], "CausalPINN": [[68, null]], "Cite PINA": [[1, null]], "Code Documentation": [[2, null]], "CompetitivePINN": [[69, null]], "Condition": [[22, null]], "Continuous convolution": [[35, null]], "Continuous filter background": [[86, "continuous-filter-background"]], "Create Custom Location": [[88, "create-custom-location"]], "Data Generation": [[80, "data-generation"], [87, "data-generation"]], "DeepONet": [[51, null]], "Defining a new Equation class": [[82, "defining-a-new-equation-class"]], "Difference": [[25, null]], "EllipsoidDomain": [[26, null]], "EnhancedLinear": [[36, null]], "Equations": [[23, null]], "Equations and Operators": [[2, "equations-and-operators"]], "Example: The Burgers 1D equation": [[82, "example-the-burgers-1d-equation"]], "Exclusion": [[27, null]], "FNO": [[54, null]], "FeedForward": [[52, null]], "Filter definition": [[86, "filter-definition"]], "Filter for upsampling": [[86, "filter-for-upsampling"]], "Fourier Feature Embedding": [[38, null]], "Fourier Feature Embedding in PINA": [[83, "fourier-feature-embedding-in-pina"]], "Fourier Layers": [[37, null]], "FourierIntegralKernel": [[55, null]], "GAROM": [[70, null]], "GPINN": [[71, null]], "Generate data": [[79, "generate-data"]], "Geometries": [[2, "geometries"]], "Geometry set operations": [[2, "geometry-set-operations"]], "Getting started with PINA": [[5, "getting-started-with-pina"]], "Hard Constraint Model": [[85, "hard-constraint-model"]], "How to contribute": [[3, null]], "Input function": [[86, "input-function"]], "Installation": [[4, null]], "Installing from source": [[4, "installing-from-source"]], "Installing via PIP": [[4, "installing-via-pip"]], "Intersection": [[28, null]], "Introduction to the inverse problem": [[89, "introduction-to-the-inverse-problem"]], "Inverse problem definition in PINA": [[89, "inverse-problem-definition-in-pina"]], "KernelNeuralOperator": [[50, null]], "LabelTensor": [[33, null]], "Layers": [[2, "layers"]], "License": [[0, null]], "Location": [[29, null]], "Low Rank Neural Operator": [[56, null]], "Low Rank layer": [[39, null]], "LpLoss": [[46, null], [47, null]], "MIONet": [[57, null]], "Metrics and Losses": [[2, "metrics-and-losses"]], "Models": [[2, "models"]], "MultiFeedForward": [[58, null]], "Multiscale Problem": [[83, "multiscale-problem"]], "Network": [[59, null]], "Neural Operator Learning": [[5, "neural-operator-learning"]], "OperationInterface": [[30, null]], "Operators": [[61, null]], "Optimizer callbacks": [[20, null]], "OrthogonalBlock": [[40, null]], "PINA Features": [[2, "pina-features"]], "PINA Team": [[92, null]], "PINA Tutorials": [[5, null]], "PINN": [[72, null]], "PINNInterface": [[67, null]], "POD-NN reduced order model": [[90, "pod-nn-reduced-order-model"]], "POD-RBF reduced order model": [[90, "pod-rbf-reduced-order-model"]], "POD-RBF vs POD-NN": [[90, "pod-rbf-vs-pod-nn"]], "PODBlock": [[42, null]], "ParametricProblem": [[64, null]], "Perform a small training": [[79, "perform-a-small-training"]], "Periodic Boundary Condition Embedding": [[41, null]], "Physics Informed Neural Networks": [[5, "physics-informed-neural-networks"]], "Plotter": [[62, null]], "PowerLoss": [[48, null]], "Problem": [[2, "problem"]], "Processing callbacks": [[21, null]], "RBAPINN": [[73, null]], "RBFBlock": [[43, null]], "ReducedOrderModelSolver": [[74, null]], "References": [[90, "references"]], "Residual layer": [[44, null]], "ResidualFeedForward": [[53, null]], "SAPINN": [[75, null]], "Simple Ordinary Differential Equation": [[79, "simple-ordinary-differential-equation"]], "Simplex Domain": [[88, "simplex-domain"]], "SimplexDomain": [[31, null]], "SolverInterface": [[76, null]], "Solvers": [[2, "solvers"]], "Solving the KS problem": [[80, "solving-the-ks-problem"]], "Solving the problem with a FeedForward Neural Network": [[87, "solving-the-problem-with-a-feedforward-neural-network"]], "Solving the problem with a Fuorier Neural Operator (FNO)": [[87, "solving-the-problem-with-a-fuorier-neural-operator-fno"]], "Solving the problem with a Periodic Network": [[91, "solving-the-problem-with-a-periodic-network"]], "Solving the problem with extra-features PINNs": [[84, "solving-the-problem-with-extra-features-pinns"]], "Solving the problem with learnable extra-features PINNs": [[84, "solving-the-problem-with-learnable-extra-features-pinns"]], "Solving the problem with standard PINNs": [[84, "solving-the-problem-with-standard-pinns"]], "SpatialProblem": [[65, null]], "Spectral Convolution": [[45, null]], "Spline": [[60, null]], "Stride": [[86, "stride"]], "Submitting a patch": [[3, "submitting-a-patch"]], "Supervised Learning": [[5, "supervised-learning"]], "SupervisedSolver": [[77, null]], "The problem definition": [[84, "the-problem-definition"], [85, "the-problem-definition"], [91, "the-problem-definition"]], "TimeDependentProblem": [[66, null]], "Todo": [[62, "id1"]], "Train and Inference": [[85, "train-and-inference"]], "Trainer": [[78, null]], "Trainer Accelerator": [[81, "trainer-accelerator"]], "Trainer Callbacks": [[81, "trainer-callbacks"]], "Trainer Logging": [[81, "trainer-logging"]], "Trainer Tips to Boost Accuracy, Save Memory and Speed Up Training": [[81, "trainer-tips-to-boost-accuracy-save-memory-and-speed-up-training"]], "Tutorial: Averaging Neural Operator for solving Kuramoto Sivashinsky equation": [[80, null]], "Tutorial: Building custom geometries with PINA Location class": [[88, null]], "Tutorial: Multiscale PDE learning with Fourier Feature Network": [[83, null]], "Tutorial: One dimensional Helmotz equation using Periodic Boundary Conditions": [[91, null]], "Tutorial: PINA and PyTorch Lightning, training tips and visualizations": [[81, null]], "Tutorial: Physics Informed Neural Networks on PINA": [[79, null]], "Tutorial: Reduced order model (POD-RBF or POD-NN) for parametric problems": [[90, null]], "Tutorial: Resolution of an inverse problem": [[89, null]], "Tutorial: The Equation Class": [[82, null]], "Tutorial: Two dimensional Darcy flow using the Fourier Neural Operator": [[87, null]], "Tutorial: Two dimensional Poisson problem using Extra Features Learning": [[84, null]], "Tutorial: Two dimensional Wave problem with hard constraint": [[85, null]], "Tutorial: Unstructured convolutional autoencoder via continuous convolution": [[86, null]], "Union": [[32, null]], "Welcome to PINA\u2019s documentation!": [[93, null]], "What\u2019s next?": [[79, "whats-next"], [80, "whats-next"], [81, "whats-next"], [82, "whats-next"], [83, "whats-next"], [84, "whats-next"], [85, "whats-next"], [86, "whats-next"], [87, "whats-next"], [88, "whats-next"], [91, "whats-next"]], "Write the problem class": [[79, "write-the-problem-class"]]}, "docnames": ["_LICENSE", "_cite", "_rst/_code", "_rst/_contributing", "_rst/_installation", "_rst/_tutorial", "_rst/adaptive_functions/AdaptiveCELU", "_rst/adaptive_functions/AdaptiveELU", "_rst/adaptive_functions/AdaptiveExp", "_rst/adaptive_functions/AdaptiveFunctionInterface", "_rst/adaptive_functions/AdaptiveGELU", "_rst/adaptive_functions/AdaptiveMish", "_rst/adaptive_functions/AdaptiveReLU", "_rst/adaptive_functions/AdaptiveSIREN", "_rst/adaptive_functions/AdaptiveSiLU", "_rst/adaptive_functions/AdaptiveSigmoid", "_rst/adaptive_functions/AdaptiveSoftmax", "_rst/adaptive_functions/AdaptiveSoftmin", "_rst/adaptive_functions/AdaptiveTanh", "_rst/callbacks/adaptive_refinment_callbacks", "_rst/callbacks/optimizer_callbacks", "_rst/callbacks/processing_callbacks", "_rst/condition", "_rst/equations", "_rst/geometry/cartesian", "_rst/geometry/difference_domain", "_rst/geometry/ellipsoid", "_rst/geometry/exclusion_domain", "_rst/geometry/intersection_domain", "_rst/geometry/location", "_rst/geometry/operation_interface", "_rst/geometry/simplex", "_rst/geometry/union_domain", "_rst/label_tensor", "_rst/layers/avno_layer", "_rst/layers/convolution", "_rst/layers/enhanced_linear", "_rst/layers/fourier", "_rst/layers/fourier_embedding", "_rst/layers/lowrank_layer", "_rst/layers/orthogonal", "_rst/layers/pbc_embedding", "_rst/layers/pod", "_rst/layers/rbf_layer", "_rst/layers/residual", "_rst/layers/spectral", "_rst/loss/loss_interface", "_rst/loss/lploss", "_rst/loss/powerloss", "_rst/models/avno", "_rst/models/base_no", "_rst/models/deeponet", "_rst/models/fnn", "_rst/models/fnn_residual", "_rst/models/fno", "_rst/models/fourier_kernel", "_rst/models/lno", "_rst/models/mionet", "_rst/models/multifeedforward", "_rst/models/network", "_rst/models/spline", "_rst/operators", "_rst/plotter", "_rst/problem/abstractproblem", "_rst/problem/parametricproblem", "_rst/problem/spatialproblem", "_rst/problem/timedepproblem", "_rst/solvers/basepinn", "_rst/solvers/causalpinn", "_rst/solvers/competitivepinn", "_rst/solvers/garom", "_rst/solvers/gpinn", "_rst/solvers/pinn", "_rst/solvers/rba_pinn", "_rst/solvers/rom", "_rst/solvers/sapinn", "_rst/solvers/solver_interface", "_rst/solvers/supervised", "_rst/trainer", "_rst/tutorials/tutorial1/tutorial", "_rst/tutorials/tutorial10/tutorial", "_rst/tutorials/tutorial11/tutorial", "_rst/tutorials/tutorial12/tutorial", "_rst/tutorials/tutorial13/tutorial", "_rst/tutorials/tutorial2/tutorial", "_rst/tutorials/tutorial3/tutorial", "_rst/tutorials/tutorial4/tutorial", "_rst/tutorials/tutorial5/tutorial", "_rst/tutorials/tutorial6/tutorial", "_rst/tutorials/tutorial7/tutorial", "_rst/tutorials/tutorial8/tutorial", "_rst/tutorials/tutorial9/tutorial", "_team", "index"], "envversion": {"sphinx": 62, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.todo": 2, "sphinx.ext.viewcode": 1}, "filenames": ["_LICENSE.rst", "_cite.rst", "_rst/_code.rst", "_rst/_contributing.rst", "_rst/_installation.rst", "_rst/_tutorial.rst", "_rst/adaptive_functions/AdaptiveCELU.rst", "_rst/adaptive_functions/AdaptiveELU.rst", "_rst/adaptive_functions/AdaptiveExp.rst", "_rst/adaptive_functions/AdaptiveFunctionInterface.rst", "_rst/adaptive_functions/AdaptiveGELU.rst", "_rst/adaptive_functions/AdaptiveMish.rst", "_rst/adaptive_functions/AdaptiveReLU.rst", "_rst/adaptive_functions/AdaptiveSIREN.rst", "_rst/adaptive_functions/AdaptiveSiLU.rst", "_rst/adaptive_functions/AdaptiveSigmoid.rst", "_rst/adaptive_functions/AdaptiveSoftmax.rst", "_rst/adaptive_functions/AdaptiveSoftmin.rst", "_rst/adaptive_functions/AdaptiveTanh.rst", "_rst/callbacks/adaptive_refinment_callbacks.rst", "_rst/callbacks/optimizer_callbacks.rst", "_rst/callbacks/processing_callbacks.rst", "_rst/condition.rst", "_rst/equations.rst", "_rst/geometry/cartesian.rst", "_rst/geometry/difference_domain.rst", "_rst/geometry/ellipsoid.rst", "_rst/geometry/exclusion_domain.rst", "_rst/geometry/intersection_domain.rst", "_rst/geometry/location.rst", "_rst/geometry/operation_interface.rst", "_rst/geometry/simplex.rst", "_rst/geometry/union_domain.rst", "_rst/label_tensor.rst", "_rst/layers/avno_layer.rst", "_rst/layers/convolution.rst", "_rst/layers/enhanced_linear.rst", "_rst/layers/fourier.rst", "_rst/layers/fourier_embedding.rst", "_rst/layers/lowrank_layer.rst", "_rst/layers/orthogonal.rst", "_rst/layers/pbc_embedding.rst", "_rst/layers/pod.rst", "_rst/layers/rbf_layer.rst", "_rst/layers/residual.rst", "_rst/layers/spectral.rst", "_rst/loss/loss_interface.rst", "_rst/loss/lploss.rst", "_rst/loss/powerloss.rst", "_rst/models/avno.rst", "_rst/models/base_no.rst", "_rst/models/deeponet.rst", "_rst/models/fnn.rst", "_rst/models/fnn_residual.rst", "_rst/models/fno.rst", "_rst/models/fourier_kernel.rst", "_rst/models/lno.rst", "_rst/models/mionet.rst", "_rst/models/multifeedforward.rst", "_rst/models/network.rst", "_rst/models/spline.rst", "_rst/operators.rst", "_rst/plotter.rst", "_rst/problem/abstractproblem.rst", "_rst/problem/parametricproblem.rst", "_rst/problem/spatialproblem.rst", "_rst/problem/timedepproblem.rst", "_rst/solvers/basepinn.rst", "_rst/solvers/causalpinn.rst", "_rst/solvers/competitivepinn.rst", "_rst/solvers/garom.rst", "_rst/solvers/gpinn.rst", "_rst/solvers/pinn.rst", "_rst/solvers/rba_pinn.rst", "_rst/solvers/rom.rst", "_rst/solvers/sapinn.rst", "_rst/solvers/solver_interface.rst", "_rst/solvers/supervised.rst", "_rst/trainer.rst", "_rst/tutorials/tutorial1/tutorial.rst", "_rst/tutorials/tutorial10/tutorial.rst", "_rst/tutorials/tutorial11/tutorial.rst", "_rst/tutorials/tutorial12/tutorial.rst", "_rst/tutorials/tutorial13/tutorial.rst", "_rst/tutorials/tutorial2/tutorial.rst", "_rst/tutorials/tutorial3/tutorial.rst", "_rst/tutorials/tutorial4/tutorial.rst", "_rst/tutorials/tutorial5/tutorial.rst", "_rst/tutorials/tutorial6/tutorial.rst", "_rst/tutorials/tutorial7/tutorial.rst", "_rst/tutorials/tutorial8/tutorial.rst", "_rst/tutorials/tutorial9/tutorial.rst", "_team.rst", "index.rst"], "indexentries": {"abstractproblem (class in pina.problem.abstract_problem)": [[63, "pina.problem.abstract_problem.AbstractProblem", false]], "adaptiveactivationfunctioninterface (class in pina.adaptive_functions.adaptive_func_interface)": [[9, "pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface", false]], "adaptivecelu (class in pina.adaptive_functions.adaptive_func)": [[6, "pina.adaptive_functions.adaptive_func.AdaptiveCELU", false]], "adaptiveelu (class in pina.adaptive_functions.adaptive_func)": [[7, "pina.adaptive_functions.adaptive_func.AdaptiveELU", false]], "adaptiveexp (class in pina.adaptive_functions.adaptive_func)": [[8, "pina.adaptive_functions.adaptive_func.AdaptiveExp", false]], "adaptivegelu (class in pina.adaptive_functions.adaptive_func)": [[10, "pina.adaptive_functions.adaptive_func.AdaptiveGELU", false]], "adaptivemish (class in pina.adaptive_functions.adaptive_func)": [[11, "pina.adaptive_functions.adaptive_func.AdaptiveMish", false]], "adaptiverelu (class in pina.adaptive_functions.adaptive_func)": [[12, "pina.adaptive_functions.adaptive_func.AdaptiveReLU", false]], "adaptivesigmoid (class in pina.adaptive_functions.adaptive_func)": [[15, "pina.adaptive_functions.adaptive_func.AdaptiveSigmoid", false]], "adaptivesilu (class in pina.adaptive_functions.adaptive_func)": [[14, "pina.adaptive_functions.adaptive_func.AdaptiveSiLU", false]], "adaptivesiren (class in pina.adaptive_functions.adaptive_func)": [[13, "pina.adaptive_functions.adaptive_func.AdaptiveSIREN", false]], "adaptivesoftmax (class in pina.adaptive_functions.adaptive_func)": [[16, "pina.adaptive_functions.adaptive_func.AdaptiveSoftmax", false]], "adaptivesoftmin (class in pina.adaptive_functions.adaptive_func)": [[17, "pina.adaptive_functions.adaptive_func.AdaptiveSoftmin", false]], "adaptivetanh (class in pina.adaptive_functions.adaptive_func)": [[18, "pina.adaptive_functions.adaptive_func.AdaptiveTanh", false]], "add_points() (abstractproblem method)": [[63, "pina.problem.abstract_problem.AbstractProblem.add_points", false]], "advection() (in module pina.operators)": [[61, "pina.operators.advection", false]], "aggregator (mionet property)": [[57, "pina.model.deeponet.MIONet.aggregator", false]], "alpha (adaptiveactivationfunctioninterface property)": [[9, "pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface.alpha", false]], "append() (labeltensor method)": [[33, "pina.label_tensor.LabelTensor.append", false]], "averagingneuraloperator (class in pina.model.avno)": [[49, "pina.model.avno.AveragingNeuralOperator", false]], "basis (podblock property)": [[42, "pina.model.layers.pod.PODBlock.basis", false]], "basis() (spline method)": [[60, "pina.model.spline.Spline.basis", false]], "beta (adaptiveactivationfunctioninterface property)": [[9, "pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface.beta", false]], "branch_net (deeponet property)": [[51, "pina.model.deeponet.DeepONet.branch_net", false]], "build() (rbfblock static method)": [[43, "pina.model.layers.rbf_layer.RBFBlock.build", false]], "causalpinn (class in pina.solvers.pinns.causalpinn)": [[68, "pina.solvers.pinns.causalpinn.CausalPINN", false]], "clone() (labeltensor method)": [[33, "pina.label_tensor.LabelTensor.clone", false]], "competitivepinn (class in pina.solvers.pinns.competitive_pinn)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN", false]], "compute_residual() (pinninterface method)": [[67, "pina.solvers.pinns.basepinn.PINNInterface.compute_residual", false]], "condition (class in pina.condition)": [[22, "pina.condition.Condition", false]], "conditions (abstractproblem property)": [[63, "pina.problem.abstract_problem.AbstractProblem.conditions", false]], "configure_optimizers() (competitivepinn method)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.configure_optimizers", false]], "configure_optimizers() (garom method)": [[70, "pina.solvers.garom.GAROM.configure_optimizers", false]], "configure_optimizers() (pinn method)": [[72, "pina.solvers.pinns.pinn.PINN.configure_optimizers", false]], "configure_optimizers() (sapinn method)": [[75, "pina.solvers.pinns.sapinn.SAPINN.configure_optimizers", false]], "configure_optimizers() (solverinterface method)": [[76, "pina.solvers.solver.SolverInterface.configure_optimizers", false]], "configure_optimizers() (supervisedsolver method)": [[77, "pina.solvers.supervised.SupervisedSolver.configure_optimizers", false]], "cpu() (labeltensor method)": [[33, "pina.label_tensor.LabelTensor.cpu", false]], "cuda() (labeltensor method)": [[33, "pina.label_tensor.LabelTensor.cuda", false]], "current_condition_name (pinninterface property)": [[67, "pina.solvers.pinns.basepinn.PINNInterface.current_condition_name", false]], "deeponet (class in pina.model.deeponet)": [[51, "pina.model.deeponet.DeepONet", false]], "degree (rbfblock property)": [[43, "pina.model.layers.rbf_layer.RBFBlock.degree", false]], "detach() (labeltensor method)": [[33, "pina.label_tensor.LabelTensor.detach", false]], "difference (class in pina.geometry.difference_domain)": [[25, "pina.geometry.difference_domain.Difference", false]], "dim (orthogonalblock property)": [[40, "pina.model.layers.orthogonal.OrthogonalBlock.dim", false]], "discretise_domain() (abstractproblem method)": [[63, "pina.problem.abstract_problem.AbstractProblem.discretise_domain", false]], "discriminator (competitivepinn property)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.discriminator", false]], "div() (in module pina.operators)": [[61, "pina.operators.div", false]], "domain (abstractproblem property)": [[63, "pina.problem.abstract_problem.AbstractProblem.domain", false]], "eps (causalpinn property)": [[68, "pina.solvers.pinns.causalpinn.CausalPINN.eps", false]], "epsilon (rbfblock property)": [[43, "pina.model.layers.rbf_layer.RBFBlock.epsilon", false]], "equation (class in pina.equation.equation)": [[23, "pina.equation.equation.Equation", false]], "equationinterface (class in pina.equation.equation_interface)": [[23, "pina.equation.equation_interface.EquationInterface", false]], "exclusion (class in pina.geometry.exclusion_domain)": [[27, "pina.geometry.exclusion_domain.Exclusion", false]], "expand() (podblock method)": [[42, "pina.model.layers.pod.PODBlock.expand", false]], "extract() (labeltensor method)": [[33, "pina.label_tensor.LabelTensor.extract", false]], "feedforward (class in pina.model.feed_forward)": [[52, "pina.model.feed_forward.FeedForward", false]], "fit() (podblock method)": [[42, "pina.model.layers.pod.PODBlock.fit", false]], "fit() (rbfblock method)": [[43, "pina.model.layers.rbf_layer.RBFBlock.fit", false]], "fixedflux (class in pina.equation.equation_factory)": [[23, "pina.equation.equation_factory.FixedFlux", false]], "fixedgradient (class in pina.equation.equation_factory)": [[23, "pina.equation.equation_factory.FixedGradient", false]], "fixedvalue (class in pina.equation.equation_factory)": [[23, "pina.equation.equation_factory.FixedValue", false]], "fno (class in pina.model.fno)": [[54, "pina.model.fno.FNO", false]], "forward() (adaptiveactivationfunctioninterface method)": [[9, "pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface.forward", false]], "forward() (averagingneuraloperator method)": [[49, "pina.model.avno.AveragingNeuralOperator.forward", false]], "forward() (competitivepinn method)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.forward", false]], "forward() (deeponet method)": [[51, "pina.model.deeponet.DeepONet.forward", false]], "forward() (feedforward method)": [[52, "pina.model.feed_forward.FeedForward.forward", false]], "forward() (fno method)": [[54, "pina.model.fno.FNO.forward", false]], "forward() (fourierblock1d method)": [[37, "pina.model.layers.fourier.FourierBlock1D.forward", false]], "forward() (fourierblock2d method)": [[37, "pina.model.layers.fourier.FourierBlock2D.forward", false]], "forward() (fourierblock3d method)": [[37, "pina.model.layers.fourier.FourierBlock3D.forward", false]], "forward() (fourierfeatureembedding method)": [[38, "pina.model.layers.embedding.FourierFeatureEmbedding.forward", false]], "forward() (fourierintegralkernel method)": [[55, "pina.model.fno.FourierIntegralKernel.forward", false]], "forward() (garom method)": [[70, "pina.solvers.garom.GAROM.forward", false]], "forward() (kernelneuraloperator method)": [[50, "pina.model.base_no.KernelNeuralOperator.forward", false]], "forward() (lossinterface method)": [[46, "pina.loss.LossInterface.forward", false]], "forward() (lowrankneuraloperator method)": [[56, "pina.model.lno.LowRankNeuralOperator.forward", false]], "forward() (lploss method)": [[47, "pina.loss.LpLoss.forward", false]], "forward() (mionet method)": [[57, "pina.model.deeponet.MIONet.forward", false]], "forward() (network method)": [[59, "pina.model.network.Network.forward", false]], "forward() (orthogonalblock method)": [[40, "pina.model.layers.orthogonal.OrthogonalBlock.forward", false]], "forward() (periodicboundaryembedding method)": [[41, "pina.model.layers.embedding.PeriodicBoundaryEmbedding.forward", false]], "forward() (pinn method)": [[72, "pina.solvers.pinns.pinn.PINN.forward", false]], "forward() (podblock method)": [[42, "pina.model.layers.pod.PODBlock.forward", false]], "forward() (powerloss method)": [[48, "pina.loss.PowerLoss.forward", false]], "forward() (rbfblock method)": [[43, "pina.model.layers.rbf_layer.RBFBlock.forward", false]], "forward() (reducedordermodelsolver method)": [[74, "pina.solvers.rom.ReducedOrderModelSolver.forward", false]], "forward() (residualblock method)": [[44, "pina.model.layers.residual.ResidualBlock.forward", false]], "forward() (residualfeedforward method)": [[53, "pina.model.feed_forward.ResidualFeedForward.forward", false]], "forward() (sapinn method)": [[75, "pina.solvers.pinns.sapinn.SAPINN.forward", false]], "forward() (solverinterface method)": [[76, "pina.solvers.solver.SolverInterface.forward", false]], "forward() (spectralconvblock1d method)": [[45, "pina.model.layers.spectral.SpectralConvBlock1D.forward", false]], "forward() (spectralconvblock2d method)": [[45, "pina.model.layers.spectral.SpectralConvBlock2D.forward", false]], "forward() (spectralconvblock3d method)": [[45, "pina.model.layers.spectral.SpectralConvBlock3D.forward", false]], "forward() (spline method)": [[60, "pina.model.spline.Spline.forward", false]], "forward() (supervisedsolver method)": [[77, "pina.solvers.supervised.SupervisedSolver.forward", false]], "forward_map() (network method)": [[59, "pina.model.network.Network.forward_map", false]], "fourierblock1d (class in pina.model.layers.fourier)": [[37, "pina.model.layers.fourier.FourierBlock1D", false]], "fourierblock2d (class in pina.model.layers.fourier)": [[37, "pina.model.layers.fourier.FourierBlock2D", false]], "fourierblock3d (class in pina.model.layers.fourier)": [[37, "pina.model.layers.fourier.FourierBlock3D", false]], "fourierfeatureembedding (class in pina.model.layers.embedding)": [[38, "pina.model.layers.embedding.FourierFeatureEmbedding", false]], "fourierintegralkernel (class in pina.model.fno)": [[55, "pina.model.fno.FourierIntegralKernel", false]], "func (adaptiveactivationfunctioninterface property)": [[9, "pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface.func", false]], "gamma (adaptiveactivationfunctioninterface property)": [[9, "pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface.gamma", false]], "garom (class in pina.solvers.garom)": [[70, "pina.solvers.garom.GAROM", false]], "geometries (operationinterface property)": [[30, "pina.geometry.operation_interface.OperationInterface.geometries", false]], "get_metrics() (pinaprogressbar method)": [[21, "pina.callbacks.processing_callbacks.PINAProgressBar.get_metrics", false]], "gpinn (class in pina.solvers.pinns.gpinn)": [[71, "pina.solvers.pinns.gpinn.GPINN", false]], "grad() (in module pina.operators)": [[61, "pina.operators.grad", false]], "have_sampled_points (abstractproblem property)": [[63, "pina.problem.abstract_problem.AbstractProblem.have_sampled_points", false]], "indeces_variables_extracted (mionet property)": [[57, "pina.model.deeponet.MIONet.indeces_variables_extracted", false]], "input_variables (abstractproblem property)": [[63, "pina.problem.abstract_problem.AbstractProblem.input_variables", false]], "integral_kernels (kernelneuraloperator property)": [[50, "pina.model.base_no.KernelNeuralOperator.integral_kernels", false]], "intersection (class in pina.geometry.intersection_domain)": [[28, "pina.geometry.intersection_domain.Intersection", false]], "is_inside() (difference method)": [[25, "pina.geometry.difference_domain.Difference.is_inside", false]], "is_inside() (exclusion method)": [[27, "pina.geometry.exclusion_domain.Exclusion.is_inside", false]], "is_inside() (intersection method)": [[28, "pina.geometry.intersection_domain.Intersection.is_inside", false]], "is_inside() (location method)": [[29, "pina.geometry.location.Location.is_inside", false]], "is_inside() (operationinterface method)": [[30, "pina.geometry.operation_interface.OperationInterface.is_inside", false]], "is_inside() (union method)": [[32, "pina.geometry.union_domain.Union.is_inside", false]], "kernel (rbfblock property)": [[43, "pina.model.layers.rbf_layer.RBFBlock.kernel", false]], "kernel_matrix() (rbfblock static method)": [[43, "pina.model.layers.rbf_layer.RBFBlock.kernel_matrix", false]], "kernel_vector() (rbfblock static method)": [[43, "pina.model.layers.rbf_layer.RBFBlock.kernel_vector", false]], "kernelneuraloperator (class in pina.model.base_no)": [[50, "pina.model.base_no.KernelNeuralOperator", false]], "labels (labeltensor property)": [[33, "pina.label_tensor.LabelTensor.labels", false]], "labeltensor (class in pina.label_tensor)": [[33, "pina.label_tensor.LabelTensor", false]], "laplace (class in pina.equation.equation_factory)": [[23, "pina.equation.equation_factory.Laplace", false]], "laplacian() (in module pina.operators)": [[61, "pina.operators.laplacian", false]], "lifting_operator (kernelneuraloperator property)": [[50, "pina.model.base_no.KernelNeuralOperator.lifting_operator", false]], "location (class in pina.geometry.location)": [[29, "pina.geometry.location.Location", false]], "loss (pinninterface property)": [[67, "pina.solvers.pinns.basepinn.PINNInterface.loss", false]], "loss (supervisedsolver property)": [[77, "pina.solvers.supervised.SupervisedSolver.loss", false]], "loss_data() (competitivepinn method)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.loss_data", false]], "loss_data() (pinninterface method)": [[67, "pina.solvers.pinns.basepinn.PINNInterface.loss_data", false]], "loss_data() (reducedordermodelsolver method)": [[74, "pina.solvers.rom.ReducedOrderModelSolver.loss_data", false]], "loss_data() (sapinn method)": [[75, "pina.solvers.pinns.sapinn.SAPINN.loss_data", false]], "loss_data() (supervisedsolver method)": [[77, "pina.solvers.supervised.SupervisedSolver.loss_data", false]], "loss_phys() (causalpinn method)": [[68, "pina.solvers.pinns.causalpinn.CausalPINN.loss_phys", false]], "loss_phys() (competitivepinn method)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.loss_phys", false]], "loss_phys() (gpinn method)": [[71, "pina.solvers.pinns.gpinn.GPINN.loss_phys", false]], "loss_phys() (pinn method)": [[72, "pina.solvers.pinns.pinn.PINN.loss_phys", false]], "loss_phys() (pinninterface method)": [[67, "pina.solvers.pinns.basepinn.PINNInterface.loss_phys", false]], "loss_phys() (rbapinn method)": [[73, "pina.solvers.pinns.rbapinn.RBAPINN.loss_phys", false]], "loss_phys() (sapinn method)": [[75, "pina.solvers.pinns.sapinn.SAPINN.loss_phys", false]], "lossinterface (class in pina.loss)": [[46, "pina.loss.LossInterface", false]], "lowrankneuraloperator (class in pina.model.lno)": [[56, "pina.model.lno.LowRankNeuralOperator", false]], "lploss (class in pina.loss)": [[47, "pina.loss.LpLoss", false]], "metrics (metrictracker property)": [[21, "pina.callbacks.processing_callbacks.MetricTracker.metrics", false]], "metrictracker (class in pina.callbacks.processing_callbacks)": [[21, "pina.callbacks.processing_callbacks.MetricTracker", false]], "mionet (class in pina.model.deeponet)": [[57, "pina.model.deeponet.MIONet", false]], "model (mionet property)": [[57, "pina.model.deeponet.MIONet.model", false]], "models (solverinterface property)": [[76, "pina.solvers.solver.SolverInterface.models", false]], "module": [[9, "module-pina.adaptive_functions.adaptive_func_interface", false], [24, "module-pina.geometry.cartesian", false], [25, "module-pina.geometry.difference_domain", false], [26, "module-pina.geometry.ellipsoid", false], [27, "module-pina.geometry.exclusion_domain", false], [28, "module-pina.geometry.intersection_domain", false], [29, "module-pina.geometry.location", false], [30, "module-pina.geometry.operation_interface", false], [31, "module-pina.geometry.simplex", false], [32, "module-pina.geometry.union_domain", false], [46, "module-pina.loss", false], [59, "module-pina.model.network", false], [61, "module-pina.operators", false], [63, "module-pina.problem.abstract_problem", false], [64, "module-pina.problem.parametric_problem", false], [65, "module-pina.problem.spatial_problem", false], [66, "module-pina.problem.timedep_problem", false], [78, "module-pina.trainer", false]], "monomial_powers() (rbfblock static method)": [[43, "pina.model.layers.rbf_layer.RBFBlock.monomial_powers", false]], "multifeedforward (class in pina.model.multi_feed_forward)": [[58, "pina.model.multi_feed_forward.MultiFeedForward", false]], "network (class in pina.model.network)": [[59, "pina.model.network.Network", false]], "neural_net (competitivepinn property)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.neural_net", false]], "neural_net (pinn property)": [[72, "pina.solvers.pinns.pinn.PINN.neural_net", false]], "neural_net (reducedordermodelsolver property)": [[74, "pina.solvers.rom.ReducedOrderModelSolver.neural_net", false]], "neural_net (sapinn property)": [[75, "pina.solvers.pinns.sapinn.SAPINN.neural_net", false]], "neural_net (supervisedsolver property)": [[77, "pina.solvers.supervised.SupervisedSolver.neural_net", false]], "not_sampled_points (abstractproblem property)": [[63, "pina.problem.abstract_problem.AbstractProblem.not_sampled_points", false]], "on_fit_start() (pinaprogressbar method)": [[21, "pina.callbacks.processing_callbacks.PINAProgressBar.on_fit_start", false]], "on_load_checkpoint() (sapinn method)": [[75, "pina.solvers.pinns.sapinn.SAPINN.on_load_checkpoint", false]], "on_train_batch_end() (competitivepinn method)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.on_train_batch_end", false]], "on_train_batch_end() (sapinn method)": [[75, "pina.solvers.pinns.sapinn.SAPINN.on_train_batch_end", false]], "on_train_epoch_end() (metrictracker method)": [[21, "pina.callbacks.processing_callbacks.MetricTracker.on_train_epoch_end", false]], "on_train_epoch_end() (r3refinement method)": [[19, "pina.callbacks.adaptive_refinment_callbacks.R3Refinement.on_train_epoch_end", false]], "on_train_epoch_start() (switchoptimizer method)": [[20, "pina.callbacks.optimizer_callbacks.SwitchOptimizer.on_train_epoch_start", false]], "on_train_start() (r3refinement method)": [[19, "pina.callbacks.adaptive_refinment_callbacks.R3Refinement.on_train_start", false]], "on_train_start() (sapinn method)": [[75, "pina.solvers.pinns.sapinn.SAPINN.on_train_start", false]], "on_train_start() (solverinterface method)": [[76, "pina.solvers.solver.SolverInterface.on_train_start", false]], "operationinterface (class in pina.geometry.operation_interface)": [[30, "pina.geometry.operation_interface.OperationInterface", false]], "optimizer_discriminator (competitivepinn property)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.optimizer_discriminator", false]], "optimizer_model (competitivepinn property)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.optimizer_model", false]], "optimizer_model (sapinn property)": [[75, "pina.solvers.pinns.sapinn.SAPINN.optimizer_model", false]], "optimizer_weights (sapinn property)": [[75, "pina.solvers.pinns.sapinn.SAPINN.optimizer_weights", false]], "optimizers (solverinterface property)": [[76, "pina.solvers.solver.SolverInterface.optimizers", false]], "orthogonalblock (class in pina.model.layers.orthogonal)": [[40, "pina.model.layers.orthogonal.OrthogonalBlock", false]], "output_variables (abstractproblem property)": [[63, "pina.problem.abstract_problem.AbstractProblem.output_variables", false]], "parameter_domain() (parametricproblem method)": [[64, "pina.problem.parametric_problem.ParametricProblem.parameter_domain", false]], "parameters (parametricproblem property)": [[64, "pina.problem.parametric_problem.ParametricProblem.parameters", false]], "parametricproblem (class in pina.problem.parametric_problem)": [[64, "pina.problem.parametric_problem.ParametricProblem", false]], "period (periodicboundaryembedding property)": [[41, "pina.model.layers.embedding.PeriodicBoundaryEmbedding.period", false]], "periodicboundaryembedding (class in pina.model.layers.embedding)": [[41, "pina.model.layers.embedding.PeriodicBoundaryEmbedding", false]], "pina.adaptive_functions.adaptive_func_interface": [[9, "module-pina.adaptive_functions.adaptive_func_interface", false]], "pina.geometry.cartesian": [[24, "module-pina.geometry.cartesian", false]], "pina.geometry.difference_domain": [[25, "module-pina.geometry.difference_domain", false]], "pina.geometry.ellipsoid": [[26, "module-pina.geometry.ellipsoid", false]], "pina.geometry.exclusion_domain": [[27, "module-pina.geometry.exclusion_domain", false]], "pina.geometry.intersection_domain": [[28, "module-pina.geometry.intersection_domain", false]], "pina.geometry.location": [[29, "module-pina.geometry.location", false]], "pina.geometry.operation_interface": [[30, "module-pina.geometry.operation_interface", false]], "pina.geometry.simplex": [[31, "module-pina.geometry.simplex", false]], "pina.geometry.union_domain": [[32, "module-pina.geometry.union_domain", false]], "pina.loss": [[46, "module-pina.loss", false]], "pina.model.network": [[59, "module-pina.model.network", false]], "pina.operators": [[61, "module-pina.operators", false]], "pina.problem.abstract_problem": [[63, "module-pina.problem.abstract_problem", false]], "pina.problem.parametric_problem": [[64, "module-pina.problem.parametric_problem", false]], "pina.problem.spatial_problem": [[65, "module-pina.problem.spatial_problem", false]], "pina.problem.timedep_problem": [[66, "module-pina.problem.timedep_problem", false]], "pina.trainer": [[78, "module-pina.trainer", false]], "pinaprogressbar (class in pina.callbacks.processing_callbacks)": [[21, "pina.callbacks.processing_callbacks.PINAProgressBar", false]], "pinn (class in pina.solvers.pinns.pinn)": [[72, "pina.solvers.pinns.pinn.PINN", false]], "pinninterface (class in pina.solvers.pinns.basepinn)": [[67, "pina.solvers.pinns.basepinn.PINNInterface", false]], "podblock (class in pina.model.layers.pod)": [[42, "pina.model.layers.pod.PODBlock", false]], "polynomial_matrix() (rbfblock static method)": [[43, "pina.model.layers.rbf_layer.RBFBlock.polynomial_matrix", false]], "powerloss (class in pina.loss)": [[48, "pina.loss.PowerLoss", false]], "problem (solverinterface property)": [[76, "pina.solvers.solver.SolverInterface.problem", false]], "projection_operator (kernelneuraloperator property)": [[50, "pina.model.base_no.KernelNeuralOperator.projection_operator", false]], "r3refinement (class in pina.callbacks.adaptive_refinment_callbacks)": [[19, "pina.callbacks.adaptive_refinment_callbacks.R3Refinement", false]], "rank (podblock property)": [[42, "pina.model.layers.pod.PODBlock.rank", false]], "rbapinn (class in pina.solvers.pinns.rbapinn)": [[73, "pina.solvers.pinns.rbapinn.RBAPINN", false]], "rbfblock (class in pina.model.layers.rbf_layer)": [[43, "pina.model.layers.rbf_layer.RBFBlock", false]], "reduce() (podblock method)": [[42, "pina.model.layers.pod.PODBlock.reduce", false]], "reducedordermodelsolver (class in pina.solvers.rom)": [[74, "pina.solvers.rom.ReducedOrderModelSolver", false]], "reduction (mionet property)": [[57, "pina.model.deeponet.MIONet.reduction", false]], "requires_grad (orthogonalblock property)": [[40, "pina.model.layers.orthogonal.OrthogonalBlock.requires_grad", false]], "requires_grad_() (labeltensor method)": [[33, "pina.label_tensor.LabelTensor.requires_grad_", false]], "residual() (equation method)": [[23, "pina.equation.equation.Equation.residual", false]], "residual() (equationinterface method)": [[23, "pina.equation.equation_interface.EquationInterface.residual", false]], "residual() (systemequation method)": [[23, "pina.equation.system_equation.SystemEquation.residual", false]], "residualblock (class in pina.model.layers.residual)": [[44, "pina.model.layers.residual.ResidualBlock", false]], "residualfeedforward (class in pina.model.feed_forward)": [[53, "pina.model.feed_forward.ResidualFeedForward", false]], "sample() (difference method)": [[25, "pina.geometry.difference_domain.Difference.sample", false]], "sample() (exclusion method)": [[27, "pina.geometry.exclusion_domain.Exclusion.sample", false]], "sample() (intersection method)": [[28, "pina.geometry.intersection_domain.Intersection.sample", false]], "sample() (location method)": [[29, "pina.geometry.location.Location.sample", false]], "sample() (union method)": [[32, "pina.geometry.union_domain.Union.sample", false]], "sapinn (class in pina.solvers.pinns.sapinn)": [[75, "pina.solvers.pinns.sapinn.SAPINN", false]], "save_logs_and_release() (pinninterface method)": [[67, "pina.solvers.pinns.basepinn.PINNInterface.save_logs_and_release", false]], "scale (mionet property)": [[57, "pina.model.deeponet.MIONet.scale", false]], "scale_coefficients (podblock property)": [[42, "pina.model.layers.pod.PODBlock.scale_coefficients", false]], "scaler (podblock property)": [[42, "pina.model.layers.pod.PODBlock.scaler", false]], "scheduler (pinn property)": [[72, "pina.solvers.pinns.pinn.PINN.scheduler", false]], "scheduler (supervisedsolver property)": [[77, "pina.solvers.supervised.SupervisedSolver.scheduler", false]], "scheduler_discriminator (competitivepinn property)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.scheduler_discriminator", false]], "scheduler_model (competitivepinn property)": [[69, "pina.solvers.pinns.competitive_pinn.CompetitivePINN.scheduler_model", false]], "scheduler_model (sapinn property)": [[75, "pina.solvers.pinns.sapinn.SAPINN.scheduler_model", false]], "scheduler_weights (sapinn property)": [[75, "pina.solvers.pinns.sapinn.SAPINN.scheduler_weights", false]], "select() (labeltensor method)": [[33, "pina.label_tensor.LabelTensor.select", false]], "sigma (fourierfeatureembedding property)": [[38, "pina.model.layers.embedding.FourierFeatureEmbedding.sigma", false]], "smoothing (rbfblock property)": [[43, "pina.model.layers.rbf_layer.RBFBlock.smoothing", false]], "solve() (rbfblock static method)": [[43, "pina.model.layers.rbf_layer.RBFBlock.solve", false]], "solver (trainer property)": [[78, "pina.trainer.Trainer.solver", false]], "solverinterface (class in pina.solvers.solver)": [[76, "pina.solvers.solver.SolverInterface", false]], "spatial_domain() (spatialproblem method)": [[65, "pina.problem.spatial_problem.SpatialProblem.spatial_domain", false]], "spatial_variables (spatialproblem property)": [[65, "pina.problem.spatial_problem.SpatialProblem.spatial_variables", false]], "spatialproblem (class in pina.problem.spatial_problem)": [[65, "pina.problem.spatial_problem.SpatialProblem", false]], "spectralconvblock1d (class in pina.model.layers.spectral)": [[45, "pina.model.layers.spectral.SpectralConvBlock1D", false]], "spectralconvblock2d (class in pina.model.layers.spectral)": [[45, "pina.model.layers.spectral.SpectralConvBlock2D", false]], "spectralconvblock3d (class in pina.model.layers.spectral)": [[45, "pina.model.layers.spectral.SpectralConvBlock3D", false]], "spline (class in pina.model.spline)": [[60, "pina.model.spline.Spline", false]], "store_log() (pinninterface method)": [[67, "pina.solvers.pinns.basepinn.PINNInterface.store_log", false]], "supervisedsolver (class in pina.solvers.supervised)": [[77, "pina.solvers.supervised.SupervisedSolver", false]], "switchoptimizer (class in pina.callbacks.optimizer_callbacks)": [[20, "pina.callbacks.optimizer_callbacks.SwitchOptimizer", false]], "systemequation (class in pina.equation.system_equation)": [[23, "pina.equation.system_equation.SystemEquation", false]], "temporal_domain() (timedependentproblem method)": [[66, "pina.problem.timedep_problem.TimeDependentProblem.temporal_domain", false]], "temporal_variable (timedependentproblem property)": [[66, "pina.problem.timedep_problem.TimeDependentProblem.temporal_variable", false]], "timedependentproblem (class in pina.problem.timedep_problem)": [[66, "pina.problem.timedep_problem.TimeDependentProblem", false]], "to() (labeltensor method)": [[33, "pina.label_tensor.LabelTensor.to", false]], "train() (trainer method)": [[78, "pina.trainer.Trainer.train", false]], "trainer (class in pina.trainer)": [[78, "pina.trainer.Trainer", false]], "training_step() (garom method)": [[70, "pina.solvers.garom.GAROM.training_step", false]], "training_step() (pinninterface method)": [[67, "pina.solvers.pinns.basepinn.PINNInterface.training_step", false]], "training_step() (solverinterface method)": [[76, "pina.solvers.solver.SolverInterface.training_step", false]], "training_step() (supervisedsolver method)": [[77, "pina.solvers.supervised.SupervisedSolver.training_step", false]], "translation (mionet property)": [[57, "pina.model.deeponet.MIONet.translation", false]], "trunk_net (deeponet property)": [[51, "pina.model.deeponet.DeepONet.trunk_net", false]], "union (class in pina.geometry.union_domain)": [[32, "pina.geometry.union_domain.Union", false]], "variables (operationinterface property)": [[30, "pina.geometry.operation_interface.OperationInterface.variables", false]], "vstack() (labeltensor static method)": [[33, "pina.label_tensor.LabelTensor.vstack", false]], "weights_dict (sapinn property)": [[75, "pina.solvers.pinns.sapinn.SAPINN.weights_dict", false]]}, "objects": {"pina": [[46, 1, 0, "-", "loss"], [61, 1, 0, "-", "operators"], [78, 1, 0, "-", "trainer"]], "pina.adaptive_functions": [[9, 1, 0, "-", "adaptive_func_interface"]], "pina.adaptive_functions.adaptive_func": [[6, 0, 1, "", "AdaptiveCELU"], [7, 0, 1, "", "AdaptiveELU"], [8, 0, 1, "", "AdaptiveExp"], [10, 0, 1, "", "AdaptiveGELU"], [11, 0, 1, "", "AdaptiveMish"], [12, 0, 1, "", "AdaptiveReLU"], [13, 0, 1, "", "AdaptiveSIREN"], [14, 0, 1, "", "AdaptiveSiLU"], [15, 0, 1, "", "AdaptiveSigmoid"], [16, 0, 1, "", "AdaptiveSoftmax"], [17, 0, 1, "", "AdaptiveSoftmin"], [18, 0, 1, "", "AdaptiveTanh"]], "pina.adaptive_functions.adaptive_func_interface": [[9, 0, 1, "", "AdaptiveActivationFunctionInterface"]], "pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface": [[9, 2, 1, "", "alpha"], [9, 2, 1, "", "beta"], [9, 3, 1, "", "forward"], [9, 2, 1, "", "func"], [9, 2, 1, "", "gamma"]], "pina.callbacks.adaptive_refinment_callbacks": [[19, 0, 1, "", "R3Refinement"]], "pina.callbacks.adaptive_refinment_callbacks.R3Refinement": [[19, 3, 1, "", "on_train_epoch_end"], [19, 3, 1, "", "on_train_start"]], "pina.callbacks.optimizer_callbacks": [[20, 0, 1, "", "SwitchOptimizer"]], "pina.callbacks.optimizer_callbacks.SwitchOptimizer": [[20, 3, 1, "", "on_train_epoch_start"]], "pina.callbacks.processing_callbacks": [[21, 0, 1, "", "MetricTracker"], [21, 0, 1, "", "PINAProgressBar"]], "pina.callbacks.processing_callbacks.MetricTracker": [[21, 2, 1, "", "metrics"], [21, 3, 1, "", "on_train_epoch_end"]], "pina.callbacks.processing_callbacks.PINAProgressBar": [[21, 3, 1, "", "get_metrics"], [21, 3, 1, "", "on_fit_start"]], "pina.condition": [[22, 0, 1, "", "Condition"]], "pina.equation.equation": [[23, 0, 1, "", "Equation"]], "pina.equation.equation.Equation": [[23, 3, 1, "", "residual"]], "pina.equation.equation_factory": [[23, 0, 1, "", "FixedFlux"], [23, 0, 1, "", "FixedGradient"], [23, 0, 1, "", "FixedValue"], [23, 0, 1, "", "Laplace"]], "pina.equation.equation_interface": [[23, 0, 1, "", "EquationInterface"]], "pina.equation.equation_interface.EquationInterface": [[23, 3, 1, "", "residual"]], "pina.equation.system_equation": [[23, 0, 1, "", "SystemEquation"]], "pina.equation.system_equation.SystemEquation": [[23, 3, 1, "", "residual"]], "pina.geometry": [[24, 1, 0, "-", "cartesian"], [25, 1, 0, "-", "difference_domain"], [26, 1, 0, "-", "ellipsoid"], [27, 1, 0, "-", "exclusion_domain"], [28, 1, 0, "-", "intersection_domain"], [29, 1, 0, "-", "location"], [30, 1, 0, "-", "operation_interface"], [31, 1, 0, "-", "simplex"], [32, 1, 0, "-", "union_domain"]], "pina.geometry.difference_domain": [[25, 0, 1, "", "Difference"]], "pina.geometry.difference_domain.Difference": [[25, 3, 1, "", "is_inside"], [25, 3, 1, "", "sample"]], "pina.geometry.exclusion_domain": [[27, 0, 1, "", "Exclusion"]], "pina.geometry.exclusion_domain.Exclusion": [[27, 3, 1, "", "is_inside"], [27, 3, 1, "", "sample"]], "pina.geometry.intersection_domain": [[28, 0, 1, "", "Intersection"]], "pina.geometry.intersection_domain.Intersection": [[28, 3, 1, "", "is_inside"], [28, 3, 1, "", "sample"]], "pina.geometry.location": [[29, 0, 1, "", "Location"]], "pina.geometry.location.Location": [[29, 3, 1, "", "is_inside"], [29, 3, 1, "", "sample"]], "pina.geometry.operation_interface": [[30, 0, 1, "", "OperationInterface"]], "pina.geometry.operation_interface.OperationInterface": [[30, 2, 1, "", "geometries"], [30, 3, 1, "", "is_inside"], [30, 2, 1, "", "variables"]], "pina.geometry.union_domain": [[32, 0, 1, "", "Union"]], "pina.geometry.union_domain.Union": [[32, 3, 1, "", "is_inside"], [32, 3, 1, "", "sample"]], "pina.label_tensor": [[33, 0, 1, "", "LabelTensor"]], "pina.label_tensor.LabelTensor": [[33, 3, 1, "", "append"], [33, 3, 1, "", "clone"], [33, 3, 1, "", "cpu"], [33, 3, 1, "", "cuda"], [33, 3, 1, "", "detach"], [33, 3, 1, "", "extract"], [33, 2, 1, "", "labels"], [33, 3, 1, "", "requires_grad_"], [33, 3, 1, "", "select"], [33, 3, 1, "", "to"], [33, 3, 1, "", "vstack"]], "pina.loss": [[46, 0, 1, "", "LossInterface"], [47, 0, 1, "", "LpLoss"], [48, 0, 1, "", "PowerLoss"]], "pina.loss.LossInterface": [[46, 3, 1, "", "forward"]], "pina.loss.LpLoss": [[47, 3, 1, "", "forward"]], "pina.loss.PowerLoss": [[48, 3, 1, "", "forward"]], "pina.model": [[59, 1, 0, "-", "network"]], "pina.model.avno": [[49, 0, 1, "", "AveragingNeuralOperator"]], "pina.model.avno.AveragingNeuralOperator": [[49, 3, 1, "", "forward"]], "pina.model.base_no": [[50, 0, 1, "", "KernelNeuralOperator"]], "pina.model.base_no.KernelNeuralOperator": [[50, 3, 1, "", "forward"], [50, 2, 1, "", "integral_kernels"], [50, 2, 1, "", "lifting_operator"], [50, 2, 1, "", "projection_operator"]], "pina.model.deeponet": [[51, 0, 1, "", "DeepONet"], [57, 0, 1, "", "MIONet"]], "pina.model.deeponet.DeepONet": [[51, 2, 1, "", "branch_net"], [51, 3, 1, "", "forward"], [51, 2, 1, "", "trunk_net"]], "pina.model.deeponet.MIONet": [[57, 2, 1, "", "aggregator"], [57, 3, 1, "", "forward"], [57, 2, 1, "", "indeces_variables_extracted"], [57, 2, 1, "", "model"], [57, 2, 1, "", "reduction"], [57, 2, 1, "", "scale"], [57, 2, 1, "", "translation"]], "pina.model.feed_forward": [[52, 0, 1, "", "FeedForward"], [53, 0, 1, "", "ResidualFeedForward"]], "pina.model.feed_forward.FeedForward": [[52, 3, 1, "", "forward"]], "pina.model.feed_forward.ResidualFeedForward": [[53, 3, 1, "", "forward"]], "pina.model.fno": [[54, 0, 1, "", "FNO"], [55, 0, 1, "", "FourierIntegralKernel"]], "pina.model.fno.FNO": [[54, 3, 1, "", "forward"]], "pina.model.fno.FourierIntegralKernel": [[55, 3, 1, "", "forward"]], "pina.model.layers.embedding": [[38, 0, 1, "", "FourierFeatureEmbedding"], [41, 0, 1, "", "PeriodicBoundaryEmbedding"]], "pina.model.layers.embedding.FourierFeatureEmbedding": [[38, 3, 1, "", "forward"], [38, 2, 1, "", "sigma"]], "pina.model.layers.embedding.PeriodicBoundaryEmbedding": [[41, 3, 1, "", "forward"], [41, 2, 1, "", "period"]], "pina.model.layers.fourier": [[37, 0, 1, "", "FourierBlock1D"], [37, 0, 1, "", "FourierBlock2D"], [37, 0, 1, "", "FourierBlock3D"]], "pina.model.layers.fourier.FourierBlock1D": [[37, 3, 1, "", "forward"]], "pina.model.layers.fourier.FourierBlock2D": [[37, 3, 1, "", "forward"]], "pina.model.layers.fourier.FourierBlock3D": [[37, 3, 1, "", "forward"]], "pina.model.layers.orthogonal": [[40, 0, 1, "", "OrthogonalBlock"]], "pina.model.layers.orthogonal.OrthogonalBlock": [[40, 2, 1, "", "dim"], [40, 3, 1, "", "forward"], [40, 2, 1, "", "requires_grad"]], "pina.model.layers.pod": [[42, 0, 1, "", "PODBlock"]], "pina.model.layers.pod.PODBlock": [[42, 2, 1, "", "basis"], [42, 3, 1, "", "expand"], [42, 3, 1, "", "fit"], [42, 3, 1, "", "forward"], [42, 2, 1, "", "rank"], [42, 3, 1, "", "reduce"], [42, 2, 1, "", "scale_coefficients"], [42, 2, 1, "", "scaler"]], "pina.model.layers.rbf_layer": [[43, 0, 1, "", "RBFBlock"]], "pina.model.layers.rbf_layer.RBFBlock": [[43, 3, 1, "", "build"], [43, 2, 1, "", "degree"], [43, 2, 1, "", "epsilon"], [43, 3, 1, "", "fit"], [43, 3, 1, "", "forward"], [43, 2, 1, "", "kernel"], [43, 3, 1, "", "kernel_matrix"], [43, 3, 1, "", "kernel_vector"], [43, 3, 1, "", "monomial_powers"], [43, 3, 1, "", "polynomial_matrix"], [43, 2, 1, "", "smoothing"], [43, 3, 1, "", "solve"]], "pina.model.layers.residual": [[44, 0, 1, "", "ResidualBlock"]], "pina.model.layers.residual.ResidualBlock": [[44, 3, 1, "", "forward"]], "pina.model.layers.spectral": [[45, 0, 1, "", "SpectralConvBlock1D"], [45, 0, 1, "", "SpectralConvBlock2D"], [45, 0, 1, "", "SpectralConvBlock3D"]], "pina.model.layers.spectral.SpectralConvBlock1D": [[45, 3, 1, "", "forward"]], "pina.model.layers.spectral.SpectralConvBlock2D": [[45, 3, 1, "", "forward"]], "pina.model.layers.spectral.SpectralConvBlock3D": [[45, 3, 1, "", "forward"]], "pina.model.lno": [[56, 0, 1, "", "LowRankNeuralOperator"]], "pina.model.lno.LowRankNeuralOperator": [[56, 3, 1, "", "forward"]], "pina.model.multi_feed_forward": [[58, 0, 1, "", "MultiFeedForward"]], "pina.model.network": [[59, 0, 1, "", "Network"]], "pina.model.network.Network": [[59, 3, 1, "", "forward"], [59, 3, 1, "", "forward_map"]], "pina.model.spline": [[60, 0, 1, "", "Spline"]], "pina.model.spline.Spline": [[60, 3, 1, "", "basis"], [60, 3, 1, "", "forward"]], "pina.operators": [[61, 4, 1, "", "advection"], [61, 4, 1, "", "div"], [61, 4, 1, "", "grad"], [61, 4, 1, "", "laplacian"]], "pina.problem": [[63, 1, 0, "-", "abstract_problem"], [64, 1, 0, "-", "parametric_problem"], [65, 1, 0, "-", "spatial_problem"], [66, 1, 0, "-", "timedep_problem"]], "pina.problem.abstract_problem": [[63, 0, 1, "", "AbstractProblem"]], "pina.problem.abstract_problem.AbstractProblem": [[63, 3, 1, "", "add_points"], [63, 2, 1, "", "conditions"], [63, 3, 1, "", "discretise_domain"], [63, 2, 1, "", "domain"], [63, 2, 1, "", "have_sampled_points"], [63, 2, 1, "", "input_variables"], [63, 2, 1, "", "not_sampled_points"], [63, 2, 1, "", "output_variables"]], "pina.problem.parametric_problem": [[64, 0, 1, "", "ParametricProblem"]], "pina.problem.parametric_problem.ParametricProblem": [[64, 3, 1, "", "parameter_domain"], [64, 2, 1, "", "parameters"]], "pina.problem.spatial_problem": [[65, 0, 1, "", "SpatialProblem"]], "pina.problem.spatial_problem.SpatialProblem": [[65, 3, 1, "", "spatial_domain"], [65, 2, 1, "", "spatial_variables"]], "pina.problem.timedep_problem": [[66, 0, 1, "", "TimeDependentProblem"]], "pina.problem.timedep_problem.TimeDependentProblem": [[66, 3, 1, "", "temporal_domain"], [66, 2, 1, "", "temporal_variable"]], "pina.solvers.garom": [[70, 0, 1, "", "GAROM"]], "pina.solvers.garom.GAROM": [[70, 3, 1, "", "configure_optimizers"], [70, 3, 1, "", "forward"], [70, 3, 1, "", "training_step"]], "pina.solvers.pinns.basepinn": [[67, 0, 1, "", "PINNInterface"]], "pina.solvers.pinns.basepinn.PINNInterface": [[67, 3, 1, "", "compute_residual"], [67, 2, 1, "", "current_condition_name"], [67, 2, 1, "", "loss"], [67, 3, 1, "", "loss_data"], [67, 3, 1, "", "loss_phys"], [67, 3, 1, "", "save_logs_and_release"], [67, 3, 1, "", "store_log"], [67, 3, 1, "", "training_step"]], "pina.solvers.pinns.causalpinn": [[68, 0, 1, "", "CausalPINN"]], "pina.solvers.pinns.causalpinn.CausalPINN": [[68, 2, 1, "", "eps"], [68, 3, 1, "", "loss_phys"]], "pina.solvers.pinns.competitive_pinn": [[69, 0, 1, "", "CompetitivePINN"]], "pina.solvers.pinns.competitive_pinn.CompetitivePINN": [[69, 3, 1, "", "configure_optimizers"], [69, 2, 1, "", "discriminator"], [69, 3, 1, "", "forward"], [69, 3, 1, "", "loss_data"], [69, 3, 1, "", "loss_phys"], [69, 2, 1, "", "neural_net"], [69, 3, 1, "", "on_train_batch_end"], [69, 2, 1, "", "optimizer_discriminator"], [69, 2, 1, "", "optimizer_model"], [69, 2, 1, "", "scheduler_discriminator"], [69, 2, 1, "", "scheduler_model"]], "pina.solvers.pinns.gpinn": [[71, 0, 1, "", "GPINN"]], "pina.solvers.pinns.gpinn.GPINN": [[71, 3, 1, "", "loss_phys"]], "pina.solvers.pinns.pinn": [[72, 0, 1, "", "PINN"]], "pina.solvers.pinns.pinn.PINN": [[72, 3, 1, "", "configure_optimizers"], [72, 3, 1, "", "forward"], [72, 3, 1, "", "loss_phys"], [72, 2, 1, "", "neural_net"], [72, 2, 1, "", "scheduler"]], "pina.solvers.pinns.rbapinn": [[73, 0, 1, "", "RBAPINN"]], "pina.solvers.pinns.rbapinn.RBAPINN": [[73, 3, 1, "", "loss_phys"]], "pina.solvers.pinns.sapinn": [[75, 0, 1, "", "SAPINN"]], "pina.solvers.pinns.sapinn.SAPINN": [[75, 3, 1, "", "configure_optimizers"], [75, 3, 1, "", "forward"], [75, 3, 1, "", "loss_data"], [75, 3, 1, "", "loss_phys"], [75, 2, 1, "", "neural_net"], [75, 3, 1, "", "on_load_checkpoint"], [75, 3, 1, "", "on_train_batch_end"], [75, 3, 1, "", "on_train_start"], [75, 2, 1, "", "optimizer_model"], [75, 2, 1, "", "optimizer_weights"], [75, 2, 1, "", "scheduler_model"], [75, 2, 1, "", "scheduler_weights"], [75, 2, 1, "", "weights_dict"]], "pina.solvers.rom": [[74, 0, 1, "", "ReducedOrderModelSolver"]], "pina.solvers.rom.ReducedOrderModelSolver": [[74, 3, 1, "", "forward"], [74, 3, 1, "", "loss_data"], [74, 2, 1, "", "neural_net"]], "pina.solvers.solver": [[76, 0, 1, "", "SolverInterface"]], "pina.solvers.solver.SolverInterface": [[76, 3, 1, "", "configure_optimizers"], [76, 3, 1, "", "forward"], [76, 2, 1, "", "models"], [76, 3, 1, "", "on_train_start"], [76, 2, 1, "", "optimizers"], [76, 2, 1, "", "problem"], [76, 3, 1, "", "training_step"]], "pina.solvers.supervised": [[77, 0, 1, "", "SupervisedSolver"]], "pina.solvers.supervised.SupervisedSolver": [[77, 3, 1, "", "configure_optimizers"], [77, 3, 1, "", "forward"], [77, 2, 1, "", "loss"], [77, 3, 1, "", "loss_data"], [77, 2, 1, "", "neural_net"], [77, 2, 1, "", "scheduler"], [77, 3, 1, "", "training_step"]], "pina.trainer": [[78, 0, 1, "", "Trainer"]], "pina.trainer.Trainer": [[78, 2, 1, "", "solver"], [78, 3, 1, "", "train"]]}, "objnames": {"0": ["py", "class", "Python class"], "1": ["py", "module", "Python module"], "2": ["py", "property", "Python property"], "3": ["py", "method", "Python method"], "4": ["py", "function", "Python function"]}, "objtypes": {"0": "py:class", "1": "py:module", "2": "py:property", "3": "py:method", "4": "py:function"}, "terms": {"": [3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 23, 33, 34, 39, 41, 49, 52, 53, 56, 59, 62, 63, 64, 65, 66, 72, 74, 75, 76, 77, 89, 90], "0": [6, 7, 10, 12, 20, 22, 24, 25, 26, 27, 28, 31, 32, 33, 35, 36, 38, 43, 51, 57, 64, 65, 66, 68, 69, 70, 71, 72, 73, 74, 75, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91], "00": [80, 81, 83, 84, 85, 87, 90, 91], "000": 85, "0000": [24, 26], "0001": 90, "000104": 81, "000113": 83, "000114": 81, "000127": 81, "000151": 83, "000204": 81, "000221": 81, "000293": 81, "000304": 84, "000335": 81, "0004": 79, "000572": 81, "000655": 81, "0008": 79, "000838": 81, "001": [20, 68, 69, 70, 71, 72, 73, 74, 75, 77, 80, 85, 86], "00106": 87, "0012": 88, "00134": 84, "0016": 81, "002": 90, "00302": 51, "005": [81, 89], "006": 84, "01": [20, 33, 80, 82, 84, 87], "0108": 24, "01149": 81, "0121": 85, "0123": 91, "01321": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "0152": 79, "0170": 88, "0182": 88, "02": [33, 74, 87, 90], "021": 51, "02291": 35, "023": 35, "02338": 19, "0246e": 33, "0274": 81, "028": 86, "0287": 81, "02e": 84, "03": 90, "0304": 81, "0307": 85, "03385": 44, "034": 86, "036": 86, "037": 74, "0385": 88, "0392": 88, "03e": 84, "04": [33, 87], "040": 86, "0419": 85, "0422": 26, "044": 86, "044715": 10, "046": 86, "0475": 88, "049": 86, "04it": 81, "05": [79, 86], "050": 86, "051": 86, "053": 86, "063": 86, "0656e": 33, "0662": 79, "0671": 33, "0674e": 79, "073": 86, "08": 86, "0846": 41, "08895": [37, 54, 55], "08e": 81, "0905": 88, "09e": [84, 85], "0it": 87, "1": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 43, 45, 47, 48, 50, 51, 54, 55, 56, 57, 64, 65, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91], "10": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 26, 31, 35, 36, 38, 41, 51, 53, 57, 63, 64, 65, 68, 71, 72, 73, 74, 75, 79, 81, 83, 84, 86, 87, 90, 91], "100": [34, 68, 80, 81, 83, 86, 87, 88, 89, 90, 91], "1000": [81, 83, 84, 85, 86, 88, 89, 90, 91], "1007": [33, 35], "100it": 87, "1016": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 38, 41, 68, 71, 73, 74, 75], "1025": 33, "1038": [51, 72], "105": 84, "10627": 81, "108": 87, "109136": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "110242": 41, "111": 86, "111722": 75, "112762": 74, "1137": [53, 57], "113938": 38, "114823": 71, "1158": 33, "116805": 73, "116813": 68, "118": 80, "119": 80, "120": [81, 86], "1208": 28, "124": 83, "128": [80, 83], "12800": 80, "12e": 81, "13": [80, 84], "1307": 86, "13221": [34, 49], "133": 81, "139": 81, "13e": [83, 84], "14": [84, 90], "1406": 79, "148": [81, 83], "149": 81, "14e": 85, "15": [80, 85, 86, 91], "150": 86, "1500": [79, 86, 88], "151": 83, "1512": 44, "152": 90, "154": 81, "155": 91, "15881": 70, "1598": 81, "16": [76, 84, 87, 88, 90], "1600": 81, "1602": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "161": 86, "163": 81, "169": 86, "17": [81, 84], "1789": 79, "17e": [81, 84], "1841e": 33, "187": 90, "18e": 84, "196": 86, "1978": 27, "1986e": 33, "1991": 32, "1999": 81, "19e": 81, "1d": [54, 55, 66, 87], "1e": [84, 90], "1it": [84, 85], "2": [4, 10, 22, 25, 27, 28, 31, 32, 33, 35, 37, 38, 39, 41, 45, 47, 48, 50, 51, 52, 53, 54, 55, 56, 57, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91], "20": [36, 39, 51, 52, 53, 54, 55, 56, 57, 70, 79, 80, 81, 86, 89, 90, 91], "200": [83, 86, 91], "2000": [33, 81, 83, 88], "2001": 26, "2010": [37, 54, 55], "2015": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 92], "2016": 44, "2018": [74, 90], "2020": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 34, 37, 49, 54, 55], "2021": [0, 38, 41, 51, 53, 72], "2022": [57, 69, 71, 75, 90], "2023": [1, 19, 35, 39, 41, 50, 56, 70, 75], "2024": [68, 73, 74], "2028": 79, "2063": 24, "2065e": 33, "20m1318043": 53, "210": 81, "2128": 32, "217041e": 90, "218": 51, "21e": 81, "21it": 83, "2207": 19, "229": 51, "2292": 88, "22m1477751": 57, "2300": 88, "2304": [34, 49], "2305": 70, "2308": 41, "2316": 33, "2355": 88, "2392e": 33, "24": [39, 50, 56, 87], "240": 86, "2448": 28, "25": [33, 84], "250": 86, "2503": 79, "253": 35, "256": 62, "2562": 32, "25e": 85, "26": 83, "261": 81, "265": 35, "26e": 84, "27": 86, "2713": 88, "27e": 81, "27it": 81, "28": 86, "2841": 28, "2851": 25, "2859": 88, "287801e": 90, "28it": 87, "2977": 26, "29e": 84, "2_2": [68, 69, 71, 72, 74, 75, 77], "2d": [5, 35, 39, 50, 54, 55, 80, 84, 86, 87, 90], "2e": [81, 84], "2f": [87, 90], "2y": 88, "3": [4, 10, 22, 24, 25, 27, 28, 32, 33, 35, 38, 41, 51, 54, 55, 57, 66, 70, 72, 80, 81, 83, 84, 85, 86, 88, 90, 91], "300": 86, "3000": [83, 88], "3081": 86, "31": 81, "32": 86, "3239": 79, "3332": 28, "3333": 24, "33it": 84, "3413": 88, "350": 86, "3500": 86, "3500e": 33, "3526": 27, "3530": 32, "354": 87, "357": 87, "363": [74, 90], "36381": 81, "3726": 25, "3800": 79, "3824e": 33, "3830": 27, "384": 38, "3868": 27, "38e": 83, "39": 80, "393": 71, "39e": 81, "3d": [54, 55, 62, 87, 88], "3f": [80, 86], "4": [3, 24, 33, 34, 35, 39, 49, 50, 51, 56, 57, 60, 80, 81, 84, 85, 86, 87, 88, 90, 91], "40": [80, 85], "400": 86, "4000": 83, "404": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "4063": 88, "409": 83, "4187": 27, "42": 81, "421": [68, 73], "422": 72, "4268e": 33, "4294": 79, "43": 53, "4317": 32, "435": 41, "44": 57, "440": 72, "4458": 28, "4474": 81, "4477": 24, "4484": 88, "44e": 81, "45": 85, "450": 86, "4596": 81, "46": 81, "460": 90, "4660": 88, "46it": 81, "47": [81, 87], "4722": 81, "474": 75, "47it": 91, "4835": 25, "48550": 19, "4872": 26, "488588e": 90, "4889": 33, "48e": 81, "49": 86, "4926": 33, "4969": 79, "4990": 88, "4999": [83, 91], "49e": 81, "49it": 81, "4e": 81, "5": [10, 19, 25, 27, 28, 32, 33, 36, 51, 53, 66, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91], "50": [80, 83, 86], "500": [86, 90], "5000": [83, 89, 91], "501": 74, "5143": 88, "5179e": 33, "5352": 1, "5357": 79, "5446e": 33, "54e": 84, "55": [74, 90], "550": 86, "5503": [33, 88], "5545": 33, "5580": 79, "55e": 84, "56": 87, "56e": 81, "56it": 80, "5769": 25, "5792": 27, "5819": 33, "58it": 81, "59": 86, "59it": 80, "5_0": 89, "5e": 84, "5f": 81, "6": [26, 33, 57, 76, 81, 83, 84, 88, 91], "600": 86, "6000": 86, "6062": 79, "60it": 81, "6150e": 33, "6157": 81, "61e": 83, "62": 81, "62it": 84, "6349": 24, "64": [80, 86], "640x480": 79, "6431": 26, "65": 83, "650": 86, "6531": 79, "65it": 87, "6605": 32, "6606": 33, "6667": 24, "6698": 88, "66e": 84, "66it": 83, "68": 85, "681447": 92, "69": 83, "69e": 84, "69it": 85, "7": [26, 81, 84, 85, 90], "70": 83, "700": 86, "7037": 27, "7043e": 33, "71": 83, "7102": 32, "7108": 88, "7116e": 33, "72": [35, 83], "7251": 88, "7272": 26, "7289": 28, "733": 86, "7357": 88, "73e": 84, "7403": 25, "7429": 79, "7456": 27, "74e": 84, "750": 86, "7597": 88, "75e": 81, "76": 83, "7643": 24, "7644": 79, "767902e": 90, "7697": 28, "76e": 84, "76it": 87, "7785": 88, "77e": 81, "77it": 83, "78": [74, 90], "784": 86, "78it": 85, "7th": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "8": [1, 26, 33, 35, 54, 55, 66, 81, 84, 85, 86, 87, 88, 89, 90], "80": 40, "8015": [24, 88], "8066": 33, "8087": 24, "80it": 81, "8101": 79, "8176": 88, "8194e": 33, "81it": [84, 87], "8207": 33, "82e": 84, "83": 87, "8303": 33, "8326": 26, "8341": 88, "8400": 25, "8426": 88, "8451": 32, "8453": 27, "84e": 84, "84it": 81, "85": 84, "85it": 81, "8623": 32, "8636": 27, "8654": [28, 88], "8674": 81, "8681": 79, "87": 1, "8735": 24, "8892e": 33, "88e": 83, "89": [39, 50, 56], "8906": 81, "8927": 33, "8976": 88, "8979": 88, "9": [33, 81, 83, 84, 87, 90], "90": 90, "9095": 79, "9141": 81, "9154": 25, "9179": 25, "91e": 83, "92": 86, "9239": 33, "9266e": 33, "9341": 88, "93e": 81, "93schmidt_process": 40, "9400": 88, "9427": 33, "9452e": 33, "94e": 84, "94it": 84, "95": 83, "9518": 33, "9545": 25, "9573": 88, "9615": 33, "9679": 79, "97": [39, 50, 56, 83], "9734": 88, "97e": [81, 85], "98": 84, "9831": 25, "9843": 28, "9853": 33, "9878": 32, "99": 89, "9902": 28, "999": [73, 81, 84, 85], "99it": 81, "9e": 81, "A": [0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 39, 41, 49, 50, 51, 54, 55, 56, 57, 62, 67, 68, 69, 71, 72, 73, 74, 75, 76, 78, 83, 84, 85, 86, 91, 92], "AND": 0, "AS": 0, "As": [79, 80, 81, 82, 86, 87, 91], "At": [67, 80, 81, 88], "BE": 0, "BUT": 0, "But": 76, "By": [22, 42, 67, 79, 85, 86], "FOR": 0, "For": [26, 31, 33, 34, 35, 39, 43, 50, 74, 76, 79, 81, 83, 84, 85, 86, 88, 91, 93], "IN": 0, "If": [1, 9, 19, 20, 23, 24, 25, 26, 27, 28, 30, 31, 33, 35, 38, 39, 40, 41, 42, 43, 47, 48, 51, 52, 53, 56, 57, 61, 62, 63, 67, 69, 70, 75, 76, 78, 79, 80, 81, 86], "In": [5, 23, 33, 34, 35, 38, 39, 50, 51, 54, 55, 57, 63, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92], "It": [3, 4, 21, 23, 34, 37, 39, 42, 43, 45, 47, 48, 49, 50, 51, 54, 55, 56, 57, 61, 67, 68, 69, 71, 72, 73, 74, 75, 77, 81, 83, 86, 87, 88, 90, 91], "NO": [0, 50, 80], "NOT": [0, 81], "NOs": 93, "No": [26, 88], "OF": 0, "OR": 0, "Of": 84, "On": [38, 76, 83], "One": 89, "Or": 1, "Such": [33, 90], "THE": 0, "TO": 0, "That": 86, "The": [0, 2, 3, 4, 9, 19, 20, 21, 22, 23, 24, 25, 28, 30, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 86, 87, 88, 89, 90, 93], "Their": 84, "Then": [49, 50, 54, 55, 56, 89, 90], "There": [3, 79, 80, 81, 83, 84, 85, 86, 91, 92], "These": [61, 79, 84, 89], "To": [3, 4, 29, 76, 79, 88, 90], "WITH": 0, "_": [6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 37, 38, 50, 67, 68, 69, 71, 72, 73, 74, 75, 77, 80, 81, 83, 86], "__": [19, 20, 26, 31, 81, 83, 89], "__init": [26, 31], "__init__": [23, 24, 26, 30, 35, 76, 81, 82, 83, 84, 85, 86, 88, 90], "__version__": 90, "_collect": 21, "_i": [68, 69, 71, 72, 77, 86], "_k": 80, "_loss": [46, 90], "_lrschedul": [69, 75], "_net": 35, "_neural_net": 90, "_r": 68, "_rst": 83, "_sample_everi": 19, "a3055": 53, "a3081": 53, "a3490": 57, "a351": 57, "a_k": 80, "ab": [37, 80, 90, 91], "abil": 86, "abl": [3, 80, 81, 84, 85, 86], "about": [3, 88, 93], "abov": [0, 43, 80, 82, 83, 86, 89], "abovement": 84, "absolut": [80, 91], "abstract": [23, 29, 30, 46, 63, 64, 65, 66, 67, 76], "abstract_problem": 22, "abstractproblem": [2, 22, 23, 59, 62, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 86, 87], "academ": 1, "acceler": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 79, 80, 83, 84, 85, 86, 87, 89, 90, 91], "acceller": 61, "accept": [3, 86], "access": [79, 81, 88], "accompani": 3, "accomplish": 90, "accord": 21, "accumul": [19, 81], "accumulate_grad_batch": 76, "accuraci": [79, 80, 83, 84, 85, 86, 90, 91], "achiev": 89, "acknowledg": [1, 92], "aco": 86, "across": [2, 24, 81, 89], "act": 53, "action": [0, 82, 91], "activ": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 34, 35, 36, 37, 39, 44, 49, 52, 53, 54, 55, 56, 80, 81], "actual": [79, 90], "ad": [24, 33, 37, 43, 63, 81, 84], "adam": [68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 86, 90], "adap": 75, "adapt": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 75, 83], "adaptiveactivationfunctioninterfac": [6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18], "add": [59, 62, 63, 81, 88], "add_point": 63, "addit": [21, 64, 67, 68, 71, 72, 73, 74, 75, 76, 77, 78, 82, 84, 86], "address": 84, "adjust": 91, "adp": 9, "advanc": [1, 79, 81, 90, 93], "advect": 61, "adversari": 70, "ae": 86, "affect": [33, 84], "affin": [34, 39], "after": [21, 30, 36, 39, 41, 42, 44, 52, 53, 56, 76, 79, 81, 84, 85, 89], "again": [79, 83], "against": [67, 69, 74, 75, 77], "aggreg": [21, 23, 51, 57], "agre": 80, "aim": [2, 68, 69, 71, 72, 73, 74, 75, 77, 79, 81, 82, 83, 90], "aivagn": 90, "aka": [62, 90], "al": [19, 35, 37, 44, 50, 51, 69, 71], "algorithm": [19, 26, 31, 35, 40, 70], "all": [0, 19, 23, 24, 25, 26, 27, 28, 30, 31, 32, 33, 41, 42, 43, 46, 47, 48, 50, 59, 61, 62, 63, 67, 75, 76, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 91, 92], "allow": [20, 33, 58, 79, 81, 83, 88], "almost": [3, 84], "along": [40, 90], "alpha": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 64, 65, 84, 88], "alreadi": [3, 23, 63, 80, 81, 83, 85, 86], "also": [3, 24, 26, 31, 32, 33, 41, 52, 53, 70, 76, 79, 81, 82, 83, 86, 88, 89, 90, 91, 92], "alwai": [3, 24, 35, 81, 83, 84, 86], "amaz": 81, "ambient": 92, "ameya": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "among": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 64, 88], "an": [0, 3, 9, 19, 21, 23, 26, 33, 34, 37, 39, 41, 45, 64, 65, 66, 67, 68, 74, 76, 79, 81, 82, 83, 84, 86, 90, 91, 93], "anaconda3": 90, "anagnostopoulo": 73, "analog": 82, "analysi": 84, "analyt": [79, 91], "analyz": 84, "anandkumar": [37, 39, 50, 54, 55, 56], "ani": [0, 3, 23, 29, 30, 33, 39, 52, 53, 56, 59, 61, 69, 75, 76, 79, 80, 81, 90, 91], "anna": 1, "anneal": 68, "anoth": 84, "anymor": 4, "anytim": 67, "api": [2, 21, 78, 79, 93], "append": [33, 81, 88], "appli": [23, 30, 36, 37, 38, 41, 43, 44, 46, 47, 48, 49, 50, 52, 53, 54, 55, 56, 59, 63, 68, 71, 73, 79, 81, 83, 86, 90, 91], "applic": [39, 50, 56, 69, 75, 81, 90], "appreciabili": 84, "approach": [74, 81, 83, 84, 87, 90], "appropri": 75, "approx": [50, 91], "approxim": [34, 39, 41, 49, 50, 51, 56, 70, 77, 80, 83, 85, 86, 90, 91, 93], "ar": [3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 31, 33, 35, 38, 39, 40, 42, 43, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 61, 62, 63, 67, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92], "arang": 86, "arbitrari": [47, 48, 81, 91], "architectur": [49, 51, 54, 56, 57, 69, 80, 83, 86, 90], "aren": 3, "arg": [22, 33, 46, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77], "argument": [19, 20, 21, 76, 78, 81, 84, 88], "aris": 0, "arka": 19, "aroma": 92, "around": [81, 83, 88], "arounf": 23, "arrai": 86, "articl": 1, "arxiv": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 34, 37, 41, 44, 49, 54, 55, 70], "ask": [90, 91], "aspect": [78, 80], "assert": [79, 80, 83, 84, 85, 91], "assign": [3, 82], "associ": [0, 69, 75, 76], "assum": [41, 74], "attent": 73, "attr": 90, "attribut": [33, 79, 90], "augment": [38, 41, 59, 67, 68, 71, 72, 73, 74, 75, 76, 77, 91], "author": [0, 1, 86, 87], "auto": 80, "autoencod": [5, 74], "autograd": [33, 40, 84], "automat": [23, 35, 67, 76, 81, 84, 90], "automatic_optim": 76, "avail": [2, 21, 24, 25, 26, 27, 28, 31, 32, 51, 57, 62, 63, 76, 79, 80, 81, 82, 83, 86, 87, 88, 90, 91], "avalu": [49, 56], "averag": [2, 5, 39, 56, 67, 74, 77, 81], "averagingneuraloper": [2, 49, 80], "avnoblock": 34, "avoid": [42, 47, 48, 79, 80, 81, 83, 84, 85, 86, 87, 91], "ax": [79, 80, 86, 88, 90, 91], "axi": [62, 89], "azizzadenesh": [37, 39, 50, 54, 55, 56], "b": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 22, 25, 27, 28, 32, 33, 34, 35, 37, 38, 39, 49, 50, 54, 55, 56, 68, 69, 71, 72, 73, 74, 75, 80, 83, 84, 86], "back": [37, 45, 80], "backpropag": 90, "backward": [33, 76, 86], "ballarin": 90, "banach": 57, "bar": [21, 26, 76, 81], "bare": 79, "barycentr": 31, "barycentric_coordinate_system": 31, "base": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 88, 93], "basecontinuousconv": 35, "bash": 3, "basi": [2, 39, 42, 43, 56, 60, 90], "basic": [80, 83, 86, 90], "batch": [37, 45, 47, 48, 54, 55, 67, 69, 70, 75, 76, 77, 78, 86], "batch_idx": [67, 69, 70, 75, 76, 77], "batch_siz": [34, 35, 39, 49, 50, 56, 78, 80, 86, 87, 90], "beatriz": 74, "becaus": [33, 79, 81, 88, 90], "becom": 80, "been": [1, 83, 92], "befor": [42, 43, 51, 57, 79, 81, 83, 84, 88, 89], "begin": [7, 33, 47, 48, 68, 69, 71, 72, 73, 74, 75, 79, 80, 82, 83, 84, 85, 86, 87, 89, 91], "behavior": 81, "behaviour": 83, "being": [3, 42, 46, 88], "belong": [88, 89], "below": [24, 63, 76, 79, 81, 83, 84], "benchmark": [79, 81], "best": [3, 81], "beta": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 84], "better": [81, 84, 85, 87, 89], "between": [20, 28, 39, 47, 48, 50, 56, 67, 69, 74, 75, 76, 77, 79, 80, 84, 86], "bhattacharya": [37, 39, 50, 54, 55, 56], "bia": [34, 35, 38, 39, 51, 52, 53, 56, 57, 80, 83, 84], "bibtex": 1, "big": 83, "binari": 4, "bit": 76, "block": [34, 37, 39, 40, 44, 45, 49, 54, 55, 56, 86], "blue": [88, 91], "bool": [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 39, 40, 42, 44, 47, 48, 51, 52, 53, 56, 57, 62, 70], "boolean": 29, "border": [25, 27, 28, 29, 30, 80, 88], "both": [35, 37, 39, 51, 57, 58, 59, 68, 69, 71, 72, 73, 75, 79, 86, 88, 89, 93], "bottleneck": 73, "bound": 88, "bound1": 63, "boundari": [2, 5, 22, 23, 80, 82, 84, 85, 87, 88, 89], "braga": 75, "branch": [3, 51, 57], "branch_net": 51, "branch_net1": 57, "branch_net2": 57, "bug": 3, "build": [2, 5, 42, 43, 80, 81, 82, 83, 85, 90], "built": [2, 4, 80, 81, 86], "burger_equ": 82, "burgers1d": 82, "burgers1dequ": 82, "c": [0, 33, 80, 86, 89, 91], "c_e_b_u_point": 88, "c_e_nb_d_point": 88, "c_e_nb_u_point": 88, "calcul": [19, 23, 47, 48, 61, 67, 68, 69, 71, 72, 73, 75, 76, 79, 86, 87], "call": [9, 19, 35, 51, 52, 53, 57, 67, 69, 74, 75, 76, 79, 81, 82, 83, 90], "callaback": 81, "callabl": [9, 23, 51, 57], "callback": [68, 76, 79, 84, 89], "cam": 84, "can": [2, 3, 4, 20, 21, 22, 23, 35, 38, 41, 43, 47, 48, 49, 50, 51, 54, 56, 57, 67, 68, 69, 71, 72, 73, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 93], "cannot": 83, "cap": 28, "capabl": 90, "captur": [81, 83], "care": 67, "cart_ellipse_b_union": 88, "cart_ellipse_nb_differ": 88, "cart_ellipse_nb_union": 88, "cartesian": [25, 27, 28, 88], "cartesian1": [25, 27, 28, 32], "cartesian2": [25, 27, 28, 32], "cartesian_dict": 24, "cartesian_sampl": 88, "cartesiandomain": [2, 25, 27, 28, 30, 32, 63, 64, 65, 66, 79, 81, 82, 83, 84, 85, 88, 89, 90, 91], "cartesiandomain_pt": 84, "cartesianproblem": 79, "case": [7, 22, 24, 33, 35, 38, 47, 48, 61, 68, 69, 71, 72, 73, 74, 75, 76, 79, 80, 82, 83, 84, 85, 86, 87, 88, 89, 91], "cast": [59, 90], "cat": [83, 86], "causal": 68, "causalpinn": 2, "cconv": 86, "cctivat": 44, "cdot": [41, 50, 85, 86, 87, 91], "cell": 84, "celu": [2, 6], "center": [43, 84, 86], "centeri": 86, "centerx": 86, "centroid": 86, "certain": 81, "cfd": 92, "challeng": [80, 83], "chang": [3, 33, 42, 68, 80, 81, 84, 86], "channel": [37, 45, 54, 55, 86], "chaotic": 80, "character": 83, "characterist": 82, "charg": 0, "chebyshev": [24, 63], "check": [21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 41, 51, 57, 63, 76, 79, 90], "check_bord": [24, 25, 26, 27, 28, 29, 30, 31, 32], "check_val_every_n_epoch": 76, "checkpoint": [69, 75, 90], "child": 29, "choos": [2, 26, 31, 58, 67, 76, 79, 81, 86, 91], "choosen": [21, 78], "chosen": [24, 49, 56], "christian": 26, "circ": 50, "circl": 86, "circle_grid": 86, "circleproblem": 86, "cite": 93, "claim": 0, "clamp": 67, "class": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 80, 81, 83, 84, 85, 86, 87, 89, 90, 91], "classic": 91, "classif": 86, "clean": 3, "clear": [83, 86, 91], "clearli": [83, 85, 86, 87], "clip": 81, "clippl": 81, "clone": [4, 33, 86], "close": [84, 89], "closur": 76, "cm": 90, "cma": [38, 73], "cmap": 80, "co": [38, 41, 83, 85, 86, 91], "code": [3, 4, 23, 41, 81, 83, 84, 85, 86, 91, 93], "code_formatt": 3, "codit": [70, 74], "codomain": [34, 39], "coeff": [42, 43], "coeffic": 90, "coeffici": [39, 42, 43, 64, 70, 74, 80, 82], "cog": 92, "coincid": 84, "colabor": 92, "collect": [5, 21, 90], "colloc": [19, 79, 83, 91], "color": [88, 90, 91], "colorbar": [80, 86, 89, 90], "column": [33, 35, 40, 42, 51, 57, 86], "com": 4, "combin": [21, 37, 58, 86], "come": [79, 80], "commit": 3, "common": [3, 86], "compar": [67, 69, 74, 75, 77, 79, 80, 81, 84, 85, 90, 91], "comparison": 84, "compat": [4, 35, 86], "competit": 69, "competitivepinn": [2, 83], "complet": [3, 79, 82, 83, 84, 85, 91], "complex": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 62, 82, 83, 84, 87, 88, 91], "compon": [23, 39, 51, 52, 53, 57, 61, 62, 64, 65, 66, 75, 79, 81, 82, 83, 84, 85, 89, 91], "componet": 80, "compos": [50, 80, 84, 86, 88, 90, 91], "composit": [63, 91], "comput": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 23, 34, 35, 37, 38, 39, 40, 41, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 60, 61, 67, 68, 69, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 86, 90, 91, 92], "compute_residu": 67, "concaten": [38, 83], "concatenet": 38, "conclud": 84, "condit": [0, 2, 5, 19, 21, 23, 63, 64, 65, 66, 67, 76, 79, 80, 81, 82, 83, 84, 85, 86, 87, 89, 90], "conditon": [41, 85], "conduct": 87, "confer": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 44, 69], "configur": [69, 70, 72, 75, 76, 77, 85], "configure_optim": [69, 70, 72, 75, 76, 77], "congratul": [79, 82, 83], "connect": [0, 53, 73, 83], "consid": [23, 25, 27, 28, 29, 30, 39, 52, 53, 56, 61, 79, 80, 86, 91], "consist": [3, 57, 90], "constant": [41, 54, 55, 82], "constantlr": [68, 69, 70, 71, 72, 73, 74, 75, 77, 81], "constrain": 89, "constraint": [5, 22, 41], "construct": [33, 80, 85, 86, 91], "constructor": [22, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77], "consumpt": 81, "contact": 93, "contain": [21, 35, 43, 52, 53, 61, 70, 74, 76, 81, 86, 90, 91], "content": 93, "context": 77, "continu": [2, 5, 80], "continuousclassifi": 86, "continuousconv": 86, "continuousconv2d": 35, "continuousconvblock": [35, 86], "continuousconvolut": 86, "continuum": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "contourf": 62, "contract": 0, "contrast": 86, "contribuit": 92, "contribut": [92, 93], "contributor": [0, 92], "contro": [69, 72], "control": [60, 69, 70, 72, 74, 75, 76, 84], "control_point": 60, "conv": 35, "convent": 83, "converg": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 81], "convers": 33, "convert": [59, 80], "convolut": [2, 5, 34, 37, 39, 49, 54, 55, 56, 74, 80], "convolv": 86, "coord": [39, 80], "coordin": [31, 35, 39, 41, 49, 56, 61, 67, 74, 77, 83, 84, 85, 86, 89, 91], "coordinates_indic": [49, 56, 80], "coordinates_mesh": 86, "copi": [0, 33, 86], "copyright": 0, "core": [67, 80, 81, 83, 87, 90, 91], "cornerston": 81, "correct": [33, 83, 86], "correctli": [21, 41, 59, 85], "correspond": [20, 33, 34, 39, 51, 57, 62, 76, 84, 85, 86, 89, 90, 91], "coscia": [1, 35, 70], "coscia2023phys": 1, "cosin": 38, "cost": 81, "costum": [78, 85], "could": [26, 31, 76, 79, 83, 84, 91], "coupl": 81, "cours": [84, 88, 90, 91], "cover": 88, "coverag": 3, "coveral": 3, "cpu": [33, 61, 79, 80, 81, 83, 84, 85, 86, 87, 89, 90, 91], "creat": [9, 25, 27, 28, 32, 38, 47, 48, 58, 79, 80, 81, 85, 86, 87, 90], "crete": 85, "criteria": 31, "criterion": [47, 48, 86], "cross": [24, 33], "crossentropyloss": 86, "csvlogger": 79, "cubic": 43, "cuda": [33, 90], "cup": [32, 84, 85, 89], "current": [0, 19, 33, 40, 63, 69, 74, 75, 86, 87, 92], "current_condition_nam": 67, "current_epoch": 89, "custom": [5, 76], "d": [1, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 23, 34, 35, 39, 43, 47, 48, 49, 50, 52, 53, 56, 61, 64, 65, 66, 69, 70, 72, 73, 74, 75, 76, 79, 80, 81, 82, 83, 84, 85, 86, 87, 89, 90, 91], "d_loss": [79, 81, 83, 84, 85, 91], "damag": 0, "darci": 5, "dario": 1, "dat": 80, "data": [2, 33, 35, 38, 41, 42, 43, 46, 47, 48, 67, 69, 70, 74, 75, 76, 77, 78, 81, 83, 85, 86, 89, 90], "data_darci": 87, "data_input": 89, "data_k": 80, "data_ks2": 80, "data_output": 89, "dataload": [33, 67, 70, 76, 77, 86], "dataloader_idx": 76, "datapoint": 35, "dataset": [80, 86, 87, 90], "daw": 19, "ddu": 82, "ddudxdx": 82, "de": 81, "deal": [0, 85], "decai": [68, 73], "decid": 76, "declar": 84, "decod": [74, 76, 86], "decomposit": [2, 42, 90], "decor": 33, "decreas": [83, 86], "deep": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 41, 44, 86, 91], "deeper": 81, "deeponet": [2, 57, 80], "deepspe": 76, "def": [21, 22, 35, 64, 65, 66, 76, 79, 80, 81, 82, 83, 84, 85, 86, 88, 89, 90, 91], "defalut": 81, "default": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 25, 26, 27, 28, 29, 31, 32, 33, 34, 35, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 51, 52, 53, 54, 55, 56, 57, 59, 61, 62, 63, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 81, 82, 85, 86, 88], "default_root_dir": 89, "defin": [2, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 25, 26, 27, 28, 30, 32, 46, 50, 51, 52, 53, 57, 58, 63, 67, 79, 81, 84, 86, 87, 88, 89, 90], "definit": [23, 63, 64, 65, 66, 67, 70, 74, 76, 79], "defualt": [70, 81], "degrad": [26, 31], "degre": [43, 47, 48, 60, 81], "delta": [80, 83, 84, 85, 89], "delta_u": [66, 89], "delv": 81, "demo": [1, 35, 70], "demonstr": [79, 81, 91], "denot": [73, 75], "depend": [5, 24, 49, 54, 55, 56, 59, 63, 66, 79, 80, 81, 83, 85, 91], "depict": 2, "deprec": 46, "deriv": [41, 61, 64, 79, 80, 91], "describ": [3, 31, 47, 48, 82, 83], "design": [49, 51, 54, 56, 57, 81], "desir": [41, 51, 57, 84], "despit": [31, 84], "detach": [33, 80, 86, 89, 90], "detail": [33, 40, 50, 79, 86], "determin": 29, "determinist": 81, "develop": [81, 92], "deviat": [38, 42], "devic": [33, 81], "dezert": 26, "dict": [20, 21, 23, 24, 26, 35, 41, 42, 57, 58, 62, 63, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79], "dictionari": [20, 21, 23, 24, 26, 42, 43, 58, 62, 63, 76, 86, 91], "diff": 91, "differ": [2, 20, 27, 28, 31, 32, 35, 49, 50, 51, 54, 55, 56, 57, 58, 61, 63, 76, 79, 80, 81, 83, 84, 85, 88, 90, 91, 92], "differen": 86, "differenti": [2, 37, 38, 54, 55, 61, 68, 69, 71, 72, 73, 74, 75, 78, 80, 87, 90, 93], "difficulti": 80, "digit": 92, "dim": [35, 40, 41, 83, 86], "dim_grid": 86, "dim_t": 80, "dim_x": 80, "dimens": [24, 25, 26, 27, 28, 31, 32, 34, 35, 37, 38, 39, 40, 41, 44, 45, 49, 50, 51, 52, 53, 54, 55, 56, 57, 69, 72, 74, 75, 80, 82, 86, 87, 91], "dimension": [5, 34, 37, 39, 41, 45, 49, 50, 54, 55, 56, 65, 83, 86, 88, 90], "direct": [35, 79, 80, 81, 83, 84, 85, 86, 91], "directli": [4, 79, 81, 86, 90], "directori": 89, "dirichlet": [23, 80, 82, 84, 87], "discourag": 81, "discoveri": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "discrat": 80, "discret": [79, 80, 86, 89], "discretis": [74, 77, 83, 84, 91], "discretise_domain": [63, 79, 81, 83, 84, 85, 89, 91], "discrimin": [69, 70], "displai": 21, "distribut": [0, 4, 26, 70, 80], "div": 61, "dive": 81, "diverg": [61, 82, 91], "divergeng": 61, "divgrad": 61, "divid": [2, 23, 46, 47, 48], "divis": [47, 48, 67], "do": [0, 3, 35, 39, 51, 57, 76, 79, 81, 84, 85, 86, 88, 90], "document": [0, 4, 79, 86, 88, 91], "doe": [49, 54, 56, 59, 69, 74, 81, 90], "doesn": 86, "doi": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 34, 35, 37, 38, 41, 44, 49, 51, 53, 54, 55, 57, 68, 70, 71, 72, 73, 74, 75], "domain": [23, 24, 25, 26, 27, 28, 30, 31, 32, 35, 39, 50, 59, 62, 63, 64, 65, 66, 79, 80, 83, 84, 85, 86, 87, 89, 90, 91], "don": [21, 80, 86], "done": [3, 33, 41, 74, 79, 80, 81, 86, 88, 89, 90], "dong": 41, "dot": [38, 39, 47, 48, 73, 75, 80], "download": [86, 87], "driven": [70, 74], "dropout": 36, "dropout_prob": 36, "dtype": [33, 80, 87], "du": 82, "dudt": [66, 82, 85], "dudx": 82, "due": [84, 85], "dure": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 23, 62, 68, 74, 77, 79, 81, 82, 83, 84, 86, 89, 90], "dx": [79, 91], "dy": [34, 80], "dynam": [80, 90], "e": [4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 47, 48, 49, 52, 56, 63, 64, 65, 66, 68, 70, 72, 73, 74, 76, 77, 79, 80, 81, 82, 83, 85, 89, 90, 91], "e1": 83, "e2": [40, 83], "each": [3, 19, 20, 21, 23, 37, 38, 41, 43, 45, 47, 48, 50, 57, 62, 67, 69, 75, 76, 79, 81, 82, 84, 88, 91], "earli": 92, "earlystop": 81, "eas": 23, "easi": [3, 23, 62, 79, 80, 81, 83], "easier": 33, "easili": [79, 81, 84, 86], "effici": [26, 81], "eg": 81, "eigenvector": [38, 83], "either": [33, 35, 41, 46, 81, 90, 92], "elast": 87, "element": [23, 46, 47, 48, 63, 67, 70, 77, 88], "elementwis": 9, "elip": 26, "ell": [47, 48], "ell_k": 80, "ellips": 88, "ellipse_bord": 88, "ellipse_no_bord": 88, "ellipsoid": [24, 25, 26, 27, 28, 32, 88], "ellipsoid1": [25, 27, 28, 32], "ellipsoid2": [25, 27, 28, 32], "ellipsoid_bord": 88, "ellipsoid_border_sampl": 88, "ellipsoid_dict": 26, "ellipsoid_no_bord": 88, "ellipsoid_no_border_sampl": 88, "ellipsoiddomain": [2, 25, 27, 28, 30, 32, 63, 88], "ellipt": 87, "els": 80, "elu": [2, 7], "em": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "email": 93, "emb": [34, 39, 50, 80], "embed": [2, 34, 39, 49, 50, 56, 80], "embedding1": 83, "embedding2": 83, "embedding_dimenion": 39, "embedding_dimes": 80, "emploi": [80, 90], "empti": [86, 89], "en": [31, 40], "enabl": [20, 59], "enable_model_summari": [79, 80, 81, 83, 84, 85, 86, 87, 91], "encapsul": [81, 84, 91], "encod": [38, 74, 76, 86], "end": [7, 19, 21, 47, 48, 67, 68, 69, 71, 72, 73, 74, 75, 76, 79, 80, 81, 82, 83, 84, 85, 89, 91], "energi": 86, "enforc": [23, 41, 76, 82, 91], "engin": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 38, 68, 71, 73], "enhanc": [2, 36, 71], "enhanced_linear": 36, "enhancedlinear": 2, "enough": 83, "enrich": 21, "ensur": [3, 41, 81, 91], "entir": [49, 51, 54, 56, 57], "entiti": [29, 30], "entri": [51, 57], "enumer": [86, 89, 90], "ep": 68, "epoch": [19, 20, 21, 67, 73, 76, 80, 81, 83, 84, 85, 86, 87, 89, 91], "epoch_switch": 20, "epochs_sav": 89, "epsilon": [43, 68], "equal": [26, 31, 33, 37, 45, 84, 86, 89], "equat": [5, 22, 37, 38, 54, 55, 64, 65, 66, 67, 68, 69, 71, 72, 73, 75, 81, 83, 84, 85, 86, 87, 88, 89, 90, 93], "equation_factori": 85, "equationinterfac": [23, 67, 68, 69, 71, 72, 73, 75], "erc": 92, "err": 87, "err_test": 80, "err_train": 80, "error": [33, 47, 48, 68, 69, 71, 72, 74, 75, 77, 79, 80, 81, 83, 84, 86, 87, 90, 91], "error_metr": 80, "especi": [81, 83], "essenti": 81, "estim": [26, 90], "et": [19, 35, 37, 44, 50, 51, 69, 71], "eta": 73, "etc": 22, "eu": 92, "euclidean": [47, 48], "eval": [35, 86], "evalu": [9, 22, 23, 39, 43, 46, 47, 48, 60, 67, 68, 69, 71, 72, 73, 75, 79, 84, 85, 89, 91], "even": [3, 38, 80, 81, 86], "event": 0, "eventu": 23, "everi": [9, 51, 52, 53, 57, 73, 76, 78, 89, 90], "evolv": 85, "exact": [43, 84, 85, 91], "exactli": [41, 85, 91], "exampl": [19, 20, 21, 22, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 39, 50, 51, 57, 62, 63, 64, 65, 66, 67, 76, 79, 81, 83, 86, 87, 88, 90], "example_dirichlet": 22, "example_domain": 22, "example_input_pt": 22, "example_output_pt": 22, "except": [39, 52, 53, 56], "excercis": 80, "exclud": 25, "exclus": [2, 25, 63], "execut": [42, 81], "exercis": 3, "exhibit": 83, "exist": [3, 43], "exp": [2, 6, 7, 8, 15, 16, 17, 18, 68, 79, 81, 85, 89], "expand": [39, 42, 90], "expans": 41, "expas": 91, "expeci": 81, "expect": [34, 35, 37, 39, 45, 49, 50, 52, 53, 54, 55, 56, 69, 70, 72, 75, 79, 80, 81, 85, 89, 90], "expens": 90, "expert": [41, 91], "explain": [79, 86], "exploit": [84, 85, 86, 90, 91], "explor": [20, 81], "exponenti": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 68], "express": [0, 83, 91], "extact": 81, "extend": [84, 87], "extens": [41, 81, 86, 88], "extent": 80, "extra": [5, 50, 59, 67, 75, 76], "extra_featur": [59, 67, 68, 69, 71, 72, 73, 74, 75, 76, 77, 84], "extract": [19, 22, 33, 51, 57, 59, 64, 65, 66, 67, 79, 80, 81, 82, 83, 84, 85, 87, 88, 89, 91], "extrafeatur": [84, 85, 91], "extrema": [24, 26], "f": [9, 35, 80, 81, 83, 86, 87, 88, 90, 91], "f1": 86, "f2": 86, "f_": 9, "f_1": [35, 86], "f_2": [35, 86], "facilit": 81, "fact": [85, 86], "factor": [57, 68, 69, 70, 71, 72, 73, 74, 75, 77], "failur": 19, "fairli": 79, "fals": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 40, 44, 47, 48, 62, 70, 76, 79, 80, 81, 83, 84, 85, 86, 87, 88, 90, 91], "fancier": 76, "fashion": 90, "fast": 92, "faster": 84, "favourit": 80, "featur": [3, 5, 59, 67, 68, 71, 72, 73, 74, 75, 76, 77, 81], "federico": 74, "feed": [35, 83, 84, 86], "feedforward": [2, 23, 44, 51, 53, 57, 58, 79, 80, 81, 83, 84, 86, 89, 90, 91], "feel": [80, 83], "few": [3, 93], "ffn": 84, "ffn_dict": 58, "field": [34, 35, 39, 42, 49, 50, 55, 56, 61, 84, 85, 86, 87, 90], "field_indic": [49, 56, 80], "fig": [80, 86, 88, 90, 91], "figsiz": [80, 86, 88, 90, 91], "figur": [50, 79], "file": [0, 39, 62, 90], "filenam": 62, "fill": 88, "filter": 35, "filter_dim": [35, 86], "final": [3, 24, 37, 49, 50, 51, 54, 55, 56, 57, 79, 81, 83, 84, 87, 92], "final_lay": 83, "finali": [79, 80, 83, 84, 85, 91], "find": [2, 3, 25, 68, 69, 71, 72, 73, 74, 75, 77, 80, 83, 86, 88, 89], "fine": 86, "first": [25, 33, 35, 42, 44, 76, 80, 81, 83, 84, 85, 86, 87, 88, 91], "firstli": [86, 88], "fit": [0, 42, 43, 76, 80, 81, 83, 84, 85, 86, 87, 89, 90, 91], "fit_pod": 90, "five": 2, "fix": [3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 23, 35, 62, 68, 79, 80, 81, 82, 85, 86], "fixed_vari": [62, 85], "fixedcurl": 82, "fixedflux": [23, 82], "fixedgradi": [23, 82], "fixedoper": 82, "fixedvalu": [23, 64, 65, 66, 79, 81, 82, 83, 84, 85, 89], "flag": [35, 78, 86], "flatten": [86, 90], "flexibl": 79, "float": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 23, 36, 38, 41, 43, 68, 70, 73, 74, 77, 80, 86, 87, 90], "float32": 82, "floor": [37, 45], "flow": [5, 53, 81], "fluid": 90, "flux": 23, "fno": [2, 80], "focu": [75, 80, 81, 87], "folder": 5, "follow": [0, 1, 2, 3, 31, 38, 41, 73, 74, 75, 77, 79, 80, 81, 82, 85, 86, 87, 91, 92], "foral": [41, 80], "forc": [23, 83, 84, 87, 89], "force_term": [84, 89], "forev": 81, "fork": 3, "form": [3, 35, 39, 52, 53, 57, 79, 80, 85, 86, 87], "formal": 93, "format": [1, 3, 89], "former": 74, "formual": [69, 70, 74, 75, 77], "formul": [22, 68, 71, 72, 73, 76, 91], "formula": [38, 41], "fortun": 81, "forward": [9, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 80, 81, 83, 84, 85, 86, 90], "forward_map": 59, "found": [76, 83, 90, 93], "fourier": [2, 5, 41, 45, 49, 54, 55, 56, 80], "fourier_embed": 83, "fourierblock1d": 37, "fourierblock2d": 37, "fourierblock3d": 37, "fourierfeatureembed": [38, 83], "fourierintegralkernel": 2, "fourth": 80, "foward": 85, "frac": [15, 16, 17, 18, 34, 41, 47, 48, 68, 69, 71, 72, 73, 74, 75, 77, 79, 80, 82, 85, 91], "framework": 79, "free": [0, 3, 67, 80, 83], "frequenc": [19, 38, 76, 83], "friendli": 93, "from": [0, 19, 21, 23, 25, 27, 28, 29, 30, 31, 32, 33, 34, 37, 38, 39, 43, 44, 45, 46, 47, 48, 49, 50, 51, 54, 55, 56, 57, 62, 63, 64, 65, 66, 67, 68, 69, 71, 75, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92], "frontend": 62, "frontier": [24, 26, 31, 32], "full": [81, 87], "fulli": 83, "func": [9, 34, 39, 49, 52, 53, 54, 55, 56, 79, 80, 81, 84, 86, 89, 90], "function": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 33, 34, 35, 36, 37, 39, 41, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 60, 61, 62, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 87, 88, 90, 91], "fund": 92, "fundament": [23, 63], "furnish": 0, "further": 93, "fusion": 26, "futur": 90, "g": [1, 23, 50, 51, 68, 70, 72, 74, 76, 77, 79, 80, 81, 82, 85, 90, 91], "g_": 50, "gamma": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 70, 73, 83], "gamma0": 83, "gamma0_loss": 83, "gamma1": [66, 82, 83, 84, 85, 89], "gamma1_loss": [83, 84, 85], "gamma2": [66, 82, 84, 85, 89], "gamma2_loss": [84, 85], "gamma3": [84, 85, 89], "gamma3_loss": [84, 85], "gamma4": [84, 85, 89], "gamma4_loss": [84, 85], "gamma_1": [84, 85, 89], "gamma_2": [84, 85, 89], "gamma_3": [84, 85, 89], "gamma_4": [84, 85, 89], "gamma_i": [84, 85], "gan": 76, "garom": 2, "gashler": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "gaussian": 43, "gelu": [2, 10, 34, 49], "gener": [2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 26, 49, 51, 54, 56, 57, 63, 70, 85, 86, 88, 92], "generaliz": 91, "geometri": [5, 25, 27, 28, 29, 30, 32, 63, 64, 65, 66, 79, 81, 82, 83, 84, 85, 89, 90, 91], "geometryunion": 32, "georg": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 73], "get": [40, 81, 85, 86, 88], "get_metr": 21, "get_standard_metr": 21, "gianluigi": [1, 92], "git": [3, 4], "github": [4, 83], "give": [80, 91], "given": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 22, 23, 25, 27, 28, 32, 34, 39, 42, 43, 67, 68, 69, 71, 72, 73, 75, 77, 79, 80, 81, 82, 83, 86, 89, 91], "global": [54, 55, 75, 76], "go": [3, 79, 80, 81, 83, 84, 85, 86, 87, 90, 91], "goal": [89, 90], "godfrei": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "goea": 92, "good": [3, 86, 91], "govern": [67, 68, 69, 71, 72, 73, 75], "gpinn": [2, 83], "gpu": [61, 76, 79, 80, 81, 83, 87, 90, 91], "gpu0": 90, "grad": [33, 61, 64, 65, 66, 79, 81, 82, 85], "gradient": [23, 33, 40, 53, 61, 71, 79, 81, 82, 86], "gradient_clip_v": 81, "gram": 40, "grant": 0, "granular": 38, "graph": [33, 74, 86], "graphic": 84, "grate": 92, "gratitud": 92, "great": [83, 85, 91, 92], "greater": [26, 31, 40], "green": 91, "grid": [24, 62, 63, 79, 83, 84, 86, 89, 91], "grid2": 86, "group": 3, "guarante": [4, 23, 41], "guid": [41, 91], "guidelin": 3, "h": 41, "h2020": 92, "ha": [1, 3, 33, 34, 39, 51, 57, 58, 76, 79, 81, 83, 85, 86, 88, 89, 92], "had": 86, "hand": [22, 43], "handl": [67, 75, 76, 83], "hanwen": 38, "happen": 82, "hard": [5, 41, 80, 89], "hard_spac": 85, "hard_t": 85, "hardli": 83, "hardmlp": 85, "hardmlptim": 85, "have": [3, 33, 35, 41, 42, 69, 76, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92], "have_sampled_point": 63, "haven": 81, "he": 44, "heart": 88, "heat": 87, "helmotz": 5, "helmotz_equ": 91, "helmotz_sol": 91, "help": [3, 38, 83, 84], "helper": 80, "henc": [38, 70, 74], "her": 85, "here": [2, 21, 40, 64, 65, 66, 76, 79, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91], "herebi": 0, "hesthaven": [74, 90], "hi": 85, "hidden": [34, 39, 44, 49, 50, 52, 53, 56, 80, 84, 86], "hidden_dim": 44, "hidden_dimens": 86, "hidden_s": [34, 39], "high": [2, 19, 38, 83, 84, 87, 88], "higher": [26, 31, 87, 88, 91], "highest": 86, "highli": 81, "highlight": [79, 90], "highlihgt": 88, "histori": 3, "hold": [79, 81], "holder": 0, "homogen": 89, "hook": [76, 81], "hope": 79, "how": [21, 58, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91], "howev": [81, 83, 86], "hpu": [80, 81, 83, 87, 90, 91], "html": 83, "http": [4, 31, 35, 37, 40, 44, 83], "hypercub": [24, 63, 79, 91], "hyperellipsoid": 26, "hyperparamet": [68, 80, 81], "i": [0, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93], "ic3k": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "iclr": 69, "idea": [83, 86], "identifi": [33, 75], "idx": 90, "idx_": 90, "ieee": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 44], "ij": [38, 83, 86], "imag": [44, 86], "image_transform": 86, "implement": [19, 20, 21, 23, 24, 25, 26, 27, 28, 29, 31, 32, 34, 35, 37, 39, 41, 43, 44, 45, 47, 48, 49, 51, 52, 53, 54, 55, 56, 57, 58, 59, 61, 62, 63, 67, 68, 69, 70, 71, 72, 73, 74, 75, 77, 80, 81, 82, 83, 84, 85, 86, 87, 88, 93], "impli": 0, "implicit": 80, "import": [3, 33, 64, 65, 66, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91], "impos": [41, 80, 81, 83, 85, 89], "improv": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 81, 83, 90, 91], "imshow": [80, 87], "in_featur": [35, 51, 57, 83], "includ": [0, 3, 24, 25, 26, 27, 28, 31, 32, 39, 51, 52, 53, 57, 63, 76, 79, 82, 87, 88, 90], "incorpor": 84, "increas": 42, "increment": 19, "inde": 83, "indec": [51, 57], "indeces_variables_extract": 57, "indent": 3, "independent": 24, "index": [60, 67, 69, 70, 75, 76, 77], "indic": [40, 76, 79, 80, 91], "indistinguish": 79, "industri": 90, "infeas": 91, "infer": 90, "inflow": 90, "inform": [1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 38, 41, 51, 53, 57, 67, 68, 69, 71, 72, 73, 75, 81, 83, 84, 86, 89, 91, 93], "infti": 91, "inher": [23, 46, 63], "inherit": [29, 30, 50, 67, 68, 71, 76, 79, 81, 82, 84, 85, 86, 87, 89, 90], "inhert": 88, "inifinit": 91, "inifit": 91, "initi": [3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 31, 33, 36, 37, 40, 44, 49, 54, 55, 56, 58, 79, 80, 81, 82, 83, 84, 85, 88, 89, 90], "initial_cond_test": 80, "initial_cond_train": 80, "initial_condit": [66, 82, 85], "inlin": 90, "inner": [34, 35, 39, 51, 54, 55, 57], "inner_s": [39, 52, 53, 54, 55, 56, 87], "input": [9, 22, 23, 24, 26, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 61, 63, 64, 65, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 82, 83, 84, 85, 87, 89, 90, 91], "input_": [22, 23, 61, 64, 65, 66, 79, 81, 82, 83, 84, 85, 86, 89, 91], "input_data": 86, "input_data2": 86, "input_dim": [44, 85], "input_dimens": [38, 39, 41, 52, 53, 79, 80, 81, 83, 84, 86, 87, 89, 90, 91], "input_dimenson": [51, 57], "input_indeces_branch_net": [51, 57], "input_indeces_trunk_net": [51, 57], "input_numb_field": [35, 37, 45, 55, 86], "input_point": [22, 70, 74, 80, 86, 87, 89, 90], "input_pt": [74, 77, 79], "input_tensor": [67, 69, 74, 75, 77], "input_vari": [59, 63, 79, 80, 81, 84, 85, 86, 87, 89], "insert": 81, "insid": [24, 25, 26, 27, 28, 29, 30, 31, 32, 67, 79, 83, 84], "insight": 84, "inspect": 79, "inspir": [43, 50], "instal": [87, 93], "instanc": [51, 57, 62, 67, 76, 82], "int": [19, 20, 24, 25, 26, 27, 28, 31, 32, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 45, 47, 48, 49, 51, 52, 53, 54, 55, 56, 57, 60, 62, 63, 67, 68, 69, 70, 73, 75, 77, 78, 81, 89, 90], "int_": [80, 86], "integ": [19, 48, 51, 57], "integr": [35, 50, 55, 75, 80, 86], "integral_kernel": 50, "intel": 51, "intend": 3, "intention": [67, 74, 75, 77], "interfac": [2, 67, 81, 93], "interior": 26, "intern": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 37, 58, 59, 69, 76, 79], "interpol": [2, 43, 74, 86, 90], "interpolation_network": 74, "intersect": [2, 30, 63], "interv": [60, 76], "intitialis": 86, "introduc": [79, 82, 83], "introduct": [5, 81], "introductori": 79, "intrus": [74, 90], "intuit": 93, "invalu": 3, "invari": 90, "invers": [5, 23, 67, 68, 69, 71, 72, 73, 75], "inverse_multiquadr": 43, "inverse_quadrat": 43, "inverseproblem": [23, 89], "invok": 42, "involv": 31, "io": [80, 83, 87, 90], "ipu": [80, 81, 83, 87, 90, 91], "is_insid": [24, 25, 26, 27, 28, 29, 30, 31, 32, 88], "isol": 81, "issu": 3, "itali": 92, "item": [21, 81, 83, 86, 88, 90], "iter": [76, 86], "its": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 41, 42, 50, 76, 79, 82, 90], "ivagn": 1, "j": [38, 41, 73, 74, 75, 86, 90], "jagtap": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "jan": 74, "jcp": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 41, 74, 75], "jean": 26, "jeremi": 71, "jin": [51, 57], "job": 81, "joint": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "journal": [1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 39, 41, 50, 53, 56, 57, 74, 75, 90], "juan": 73, "jump": [35, 86], "just": [2, 3, 4, 79, 81, 82, 84, 86, 88, 90], "jut": 86, "k": [34, 37, 38, 39, 50, 54, 55, 56, 60, 68, 73, 75, 84, 86, 87], "k600": 90, "k_": 38, "k_1": 50, "k_i": 50, "k_m": 50, "k_test": 87, "k_train": 87, "kaim": 44, "karniadaki": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 72, 73], "kawaguchi": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "keep": [81, 84, 86], "kei": [21, 24, 26, 41, 42, 57, 62, 63, 76, 81], "kenji": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "kept": [62, 68], "kernel": [37, 43, 45, 50, 55, 56, 80, 90], "kernel_func": 43, "kernel_matrix": 43, "kernel_vector": 43, "kernelneuraloper": [2, 49, 54, 56, 80], "kevrekidi": 72, "keyword": [20, 21, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 86], "kind": [0, 89], "knot": 60, "know": [3, 76, 80, 81], "knowledg": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "kovachki": [34, 37, 39, 49, 50, 54, 55, 56], "kuramoto": 5, "kutta": 80, "kwarg": [21, 22, 33, 62, 76, 78], "l": [35, 41, 47, 48, 50, 51, 68, 69, 71, 72, 73, 74, 75, 77, 80, 90, 91], "l2": [83, 86], "l2_error": 86, "l2_loss": 83, "l_": 41, "l_1": [41, 47, 48], "l_2": [83, 84, 86], "l_n": [47, 48], "label": [23, 31, 33, 49, 56, 59, 61, 79, 80, 84, 86, 87, 88, 89, 90, 91], "label_tensor": [33, 51, 57, 59, 88], "label_to_extract": 33, "labeltensor": [2, 9, 22, 23, 24, 25, 26, 27, 28, 31, 32, 41, 51, 57, 59, 61, 67, 68, 69, 70, 71, 72, 73, 74, 75, 77, 79, 80, 84, 86, 87, 88, 90], "lambda": 91, "lambda_": [73, 75, 91], "lambda_i": 73, "lambda_k": 70, "lambdalr": 76, "land": [25, 27, 28], "langl": 39, "lanthal": [34, 49], "laplac": [23, 61, 82, 84, 89], "laplace_equ": [84, 89], "laplacian": [61, 66, 82, 83, 84, 85, 89, 91], "laplacian_u": 84, "last": [35, 39, 52, 53, 56, 82, 86], "lastli": 88, "latent": [74, 86], "later": 86, "latin": [24, 63, 79, 91], "law": 83, "layer": [35, 36, 38, 41, 42, 43, 49, 50, 52, 53, 54, 55, 56, 79, 80, 81, 83, 84, 85, 86, 89, 90, 91], "layout": 91, "lbfg": 76, "le": 88, "lead": [81, 85], "learn": [38, 39, 44, 49, 50, 51, 54, 56, 57, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 85, 86, 87, 90], "learnabl": 90, "learningratemonitor": 76, "least": [68, 71], "led": 92, "left": [34, 38, 39, 41, 43, 47, 48, 68, 73, 75, 80, 83, 84, 86, 91], "leftarrow": 73, "legend": [89, 91], "len": [25, 27, 28, 32, 49, 56, 79, 80, 81, 84, 85, 86, 88, 89, 90], "leq": 7, "less": [20, 43, 86], "let": [33, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 91], "level": [2, 88, 90], "levi": 75, "lh": [24, 43, 63, 79, 81], "li": [34, 37, 39, 49, 50, 54, 55, 56], "liabil": 0, "liabl": 0, "lib": 90, "librari": [90, 93], "licens": 93, "lift": [49, 50, 54, 55, 56, 80], "lifting_net": [49, 54, 55, 56, 80, 87], "lifting_oper": 50, "lightin": 79, "lightinig": 79, "lightn": [4, 5, 20, 21, 75, 76, 78, 90], "lightningmodul": 76, "like": [1, 35, 76, 86], "limit": 0, "linalg": [47, 48, 86], "line": [86, 93], "linear": [3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 34, 35, 36, 37, 39, 41, 43, 44, 45, 51, 57, 80, 83, 85, 86, 87], "linear_lay": 36, "lint": 3, "linux": 4, "list": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 23, 24, 25, 26, 27, 28, 30, 31, 32, 33, 35, 37, 39, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 61, 62, 63, 67, 69, 70, 72, 75, 76, 77, 79, 81, 82, 84, 86, 88], "list_equ": 23, "liu": [37, 39, 50, 54, 55, 56], "ll": 79, "load": [2, 78, 79, 80, 89, 90], "loaded_weight": 33, "loadmat": [80, 87], "loc": 91, "local": [4, 34, 39], "locat": [2, 5, 19, 22, 24, 26, 30, 31, 63, 79, 80, 81, 82, 83, 84, 85, 89, 91], "log": [21, 62, 69, 75, 76, 79, 89], "log_every_n_step": [80, 90], "logarithm": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "logdir": 81, "loger": 80, "logged_metr": [79, 81], "logger": [67, 76, 79, 81], "logi": [62, 79, 84], "logic": 81, "logist": 14, "lognorm": 90, "logx": 62, "longer": [79, 80, 83, 84, 85, 87, 90, 91], "look": [3, 74, 88], "loop": [86, 90], "lor": [27, 32], "loss": [46, 47, 48, 62, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 83, 84, 85, 86, 87, 89, 91], "loss_data": [67, 69, 74, 75, 77], "loss_phi": [67, 68, 69, 71, 72, 73, 75], "loss_valu": 67, "lossinterfac": [2, 46, 47, 48], "love": 3, "low": [2, 19, 83, 86, 90], "lower": [81, 85, 87], "lowrank": 56, "lowrankblock": [39, 56], "lowrankneuraloper": [2, 56], "lp": [47, 48], "lploss": [2, 83, 87], "lr": [20, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 80, 84, 86, 89, 90], "lr_schedul": [68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 83], "lr_scheduler_config": 76, "lrschedul": [68, 69, 70, 71, 72, 73, 74, 75, 77], "lt": 33, "lu": [51, 57, 72], "luke": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "lvert": 73, "m": [9, 35, 38, 40, 43, 50, 68, 69, 71, 72, 73, 74, 75, 77, 91], "mac": 4, "mach": 51, "machin": [39, 49, 50, 51, 54, 56, 57, 72, 84, 90], "macro": 83, "made": [76, 87, 88], "mag": 90, "magnitud": [84, 90], "mai": 33, "main": [33, 79, 89, 90], "mainli": 84, "maintain": 3, "major": 90, "make": [3, 9, 23, 33, 37, 40, 80, 81, 84, 87, 88], "manag": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 84], "mandatori": 79, "mani": [23, 76, 78, 79, 80, 83, 84, 85, 86, 87, 91, 92], "manifold": 90, "manipul": [33, 79, 84, 85], "mantain": [23, 81], "manual": [76, 81], "manual_se": 86, "map": [37, 39, 44, 45, 49, 50, 51, 54, 56, 57, 74, 77, 83, 86], "maria": 92, "marker": 89, "mask": 75, "mat": [80, 87], "match": 41, "materi": 87, "math": 35, "mathbb": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 25, 27, 28, 32, 34, 39, 41, 50, 68, 69, 71, 72, 73, 74, 75, 77, 80, 86, 91], "mathbf": [9, 38, 41, 68, 69, 71, 72, 73, 74, 75, 77, 83, 84, 86], "mathcal": [34, 38, 50, 68, 69, 71, 72, 73, 74, 75, 77, 83, 86, 91], "mathemat": [80, 83, 84, 86, 90, 91], "mathlab": [4, 83, 92], "mathmat": 83, "matplotlib": [4, 62, 80, 86, 87, 88, 89, 90, 91], "matrix": [31, 34, 38, 39, 42, 80], "max": [6, 12, 51, 57, 86], "max_": 75, "max_epoch": [79, 80, 81, 83, 84, 85, 86, 87, 89, 90, 91], "max_j": 73, "maxim": [69, 79], "maximum": 86, "mb": 90, "mc_step": 70, "mcclenni": 75, "mean": [21, 23, 39, 42, 46, 47, 48, 51, 52, 53, 57, 62, 68, 69, 71, 72, 74, 75, 76, 77, 79, 80, 86, 87, 90], "mean_loss": [79, 80, 81, 83, 84, 85, 87, 91], "meantim": 46, "measur": [47, 48, 90], "mech": 35, "mechan": [38, 67, 68, 71, 73], "media": 87, "memori": 42, "meneghetti": 35, "meng": 57, "mention": 3, "merchant": 0, "merg": [0, 24, 33, 88], "mesh": [34, 35, 39, 49, 50, 56, 69, 72, 75, 80, 86], "meshgrid": 86, "messag": 3, "meth": 90, "method": [19, 26, 29, 31, 33, 35, 38, 41, 42, 43, 46, 47, 48, 49, 51, 54, 56, 57, 58, 59, 61, 62, 67, 68, 69, 71, 73, 74, 75, 76, 78, 79, 80, 81, 84, 85, 86, 88, 90, 91], "methodologi": 90, "metric": [21, 62, 76, 79, 81, 84, 87], "metric_err": 87, "metric_to_track": 76, "metric_v": 76, "metrictrack": [21, 79, 84], "metrictrak": 81, "michael": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18], "micro": 83, "mid": [25, 27, 28, 32], "might": [76, 80, 81, 86], "mileston": 83, "min": [6, 51, 57, 81], "min_": 75, "min_arch": 90, "min_degree_func": 43, "minim": [22, 67, 68, 69, 70, 71, 72, 73, 74, 75, 77, 79, 82, 84, 85, 86], "minimum": [43, 79, 86, 90], "minor": [3, 90], "minut": [80, 83, 85, 86], "mionet": [2, 51, 80], "mish": [2, 11], "mismatch": 20, "mit": 0, "mitig": [19, 53], "mix": 81, "mlp": [35, 39, 52, 53, 56, 90], "mode": [24, 25, 26, 27, 28, 31, 32, 33, 35, 37, 42, 45, 54, 55, 63, 76, 79, 88, 90, 91], "model": [1, 5, 20, 21, 22, 23, 35, 39, 46, 47, 48, 50, 51, 52, 53, 57, 58, 59, 60, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 86, 87, 89, 91, 93], "model_feat": 84, "model_lean": 84, "modif": [33, 81], "modifi": [0, 81, 84], "modul": [2, 9, 25, 27, 28, 29, 30, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 90, 91], "modular": 80, "moduledict": [74, 75], "modulelist": [35, 57], "moment": 88, "monitor": 76, "monomi": 43, "monomial_pow": 43, "montecarlo": 70, "more": [2, 23, 33, 62, 63, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 91], "moreov": 89, "most": [37, 45, 51, 57, 81, 88], "move": 87, "moya": 74, "mp": [61, 80, 81, 83, 91], "mse": [79, 80], "mseloss": [67, 68, 69, 71, 72, 73, 74, 75, 77, 86, 90], "mu": [74, 90], "mu1": 89, "mu2": 89, "mu_1": 89, "mu_2": 89, "mu_i": 74, "multi": [38, 76, 83], "multidimension": 61, "multifeedforward": 2, "multilay": [35, 52, 90], "multipl": [19, 20, 24, 38, 50, 57, 58, 61, 67, 76, 77, 80, 81, 82, 83, 84, 85, 86, 91], "multiquadr": 43, "multiscal": [5, 38], "multiscale_pinn": 83, "multiscalefouriernet": 83, "multisteplr": 83, "musso": 26, "must": [9, 23, 30, 35, 37, 38, 40, 43, 45, 49, 53, 56, 61, 67, 70, 71, 74, 76, 79, 86, 90], "my": 23, "n": [1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 24, 25, 26, 27, 28, 31, 32, 34, 35, 37, 38, 39, 40, 41, 43, 45, 47, 48, 49, 50, 54, 55, 56, 63, 68, 69, 70, 71, 72, 73, 74, 75, 77, 79, 80, 83, 86, 88, 90, 91], "n_": [35, 73, 75, 86], "n_kernel_lay": 56, "n_layer": [39, 49, 52, 53, 54, 55, 56, 80], "n_mode": [37, 45, 54, 55, 87], "n_t": 68, "n_test": 90, "n_train": 90, "nabla": 87, "nabla_": 71, "nabla_u": 85, "naiv": 81, "naivemetrictrack": 81, "name": [23, 61, 62, 67, 74, 76, 84, 89, 90], "nat": 51, "natur": [72, 79], "navier": 90, "navierstokeequ": 82, "navierstokesdataset": 90, "naxian": 41, "nb": 84, "ncol": 86, "ndim": [41, 43], "necessari": [3, 90], "need": [3, 19, 20, 33, 42, 43, 76, 79, 80, 81, 84, 85, 86, 87, 88, 89, 90, 91], "neighbor": 43, "net": [49, 51, 56, 57, 74, 80, 85, 86], "neto": 75, "network": [1, 2, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 35, 38, 39, 41, 49, 51, 52, 53, 54, 55, 56, 57, 58, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 80, 81, 84, 85, 86, 89, 90, 93], "neural": [1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 34, 35, 37, 38, 39, 41, 50, 51, 53, 54, 55, 57, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 81, 83, 84, 85, 86, 89, 90, 91, 93], "neural_net": [69, 72, 74, 75, 77, 87], "neuraloperatorproblem": 80, "neuraloperatorsolv": 87, "neuron": [39, 52, 53, 56, 84], "never": 33, "nevertheless": 86, "new": [3, 19, 20, 24, 33, 43, 67, 80, 83, 84, 85, 88, 90], "new_domain": [24, 91], "new_optim": 20, "new_optimizers_kwarg": 20, "new_point": 63, "next": [50, 76, 92], "ni": 41, "nice": [80, 83, 84, 85, 91], "nicola": 1, "nikolao": 73, "nmono": 43, "nn": [5, 9, 34, 35, 36, 37, 39, 41, 44, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 83, 84, 85, 86, 87, 89, 91], "nn_": [90, 91], "nn_err_": 90, "no_grad": [80, 86, 91], "no_overlap": 35, "no_sol": 80, "no_sol_test": 80, "no_sol_train": 80, "nois": 86, "non": [34, 35, 39, 74, 80, 81, 90], "none": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 35, 36, 39, 41, 43, 46, 47, 48, 52, 53, 54, 55, 59, 60, 61, 62, 67, 68, 69, 70, 71, 72, 73, 75, 76, 77, 78, 79, 80, 86], "noninfring": 0, "nonlinear": [51, 74, 80, 90], "nonloc": [34, 49, 80], "norm": [47, 48, 86, 90], "normal": [3, 44, 76, 86], "not_sampled_point": 63, "note": [24, 46, 63, 68, 80, 83, 84, 86, 90], "notic": [0, 79, 80, 81, 85, 86, 87, 88, 89, 91], "notimplementederror": 61, "now": [33, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91], "np": 90, "nrow": 86, "nu": 82, "numb_class": 86, "numb_test": 86, "numb_train": 86, "number": [1, 20, 21, 23, 24, 25, 26, 27, 28, 31, 32, 34, 35, 37, 38, 39, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 62, 63, 69, 70, 72, 75, 80, 81, 83, 84, 86, 87, 88], "number_input_fil": 86, "number_of_coordin": 80, "numer": [79, 80, 90], "numpi": [4, 89, 90], "nx": [37, 45], "ny": [37, 45], "nz": [37, 45], "o": [80, 89], "object": [19, 20, 21, 22, 23, 24, 29, 31, 34, 39, 62, 63, 79, 80, 81, 86, 88, 89, 90], "obtain": [0, 33, 34, 37, 38, 39, 45, 49, 50, 54, 55, 56, 74, 80, 81, 83, 84, 86, 91], "obvious": 83, "obviusli": 91, "od": [64, 65, 79, 81], "ode_equ": [64, 65, 79, 81], "offer": [81, 93], "offici": [4, 79], "often": 76, "old": 90, "old_gpu_warn": 90, "omega": [68, 69, 71, 72, 73, 74, 75, 77, 80, 86, 89], "omega_": 68, "omega_i": 68, "on_fit_start": 21, "on_load_checkpoint": 75, "on_train_batch_end": [69, 75], "on_train_epoch_end": [19, 21, 81, 89], "on_train_epoch_start": 20, "on_train_start": [19, 75, 76], "on_validation_end": 81, "onc": [31, 79, 82, 86], "one": [2, 9, 33, 39, 43, 45, 47, 48, 50, 52, 53, 56, 57, 63, 76, 79, 80, 81, 82, 83, 84, 85, 86, 88, 90, 91], "ones": [79, 80, 90], "onli": [3, 21, 24, 26, 31, 35, 41, 51, 57, 61, 62, 63, 68, 70, 71, 74, 76, 79, 80, 81, 82, 84, 86, 87, 88, 90], "onlin": 79, "onto": 61, "open": [1, 3, 93], "openreview": 69, "oper": [30, 33, 34, 35, 37, 39, 40, 47, 48, 50, 51, 54, 55, 57, 64, 65, 66, 77, 79, 81, 82, 83, 84, 85, 89, 91], "operationinterfac": [2, 25, 27, 28, 32], "operatornam": [47, 48], "opt1": 76, "opt2": 76, "optim": [2, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 35, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 81, 83, 84, 86, 89, 90], "optimizer1": [20, 76], "optimizer2": [20, 76], "optimizer_discrimin": [69, 70], "optimizer_discriminator_kwarg": [69, 70], "optimizer_gener": 70, "optimizer_generator_kwarg": 70, "optimizer_kwarg": [67, 68, 71, 72, 73, 74, 76, 77, 80, 84, 89, 90], "optimizer_model": [69, 75], "optimizer_model_kwarg": [69, 75], "optimizer_step": 76, "optimizer_weight": 75, "optimizer_weights_kwarg": 75, "optimizers_kwarg": [67, 76], "option": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 76, 79, 80, 83, 84, 85, 86, 87, 91], "ord": 86, "order": [3, 5, 30, 39, 41, 47, 48, 52, 53, 56, 60, 70, 71, 74, 79, 80, 81, 84, 86, 87, 89], "org": [31, 35, 37, 40, 44], "origin": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 33, 34, 35, 37, 38, 39, 41, 42, 44, 49, 50, 51, 53, 54, 55, 56, 57, 68, 69, 70, 71, 72, 73, 74, 75, 83, 86, 87, 90, 91], "orthogon": [2, 40, 42, 90], "orthonorm": 40, "other": [0, 26, 31, 40, 43, 49, 51, 54, 56, 57, 79, 81, 92], "otherwis": [0, 23, 24, 25, 26, 27, 28, 30, 31, 32, 40, 80, 81], "ouput": 86, "our": [2, 79, 80, 81, 82, 83, 84, 85, 88, 89, 90, 91], "out": [0, 33, 35, 41, 50, 76, 86, 88], "out_featur": [35, 51, 57, 83], "outlin": 81, "output": [22, 23, 34, 35, 36, 37, 38, 39, 41, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 61, 62, 63, 67, 69, 74, 75, 76, 77, 79, 80, 82, 83, 85, 86, 87, 89, 90, 91], "output_": [22, 23, 61, 64, 65, 66, 79, 81, 82, 83, 84, 85, 89, 91], "output_dim": [44, 85], "output_dimens": [38, 41, 51, 52, 53, 57, 79, 81, 83, 84, 86, 87, 89, 90, 91], "output_numb_field": [35, 37, 45, 55, 86], "output_point": [22, 70, 74, 80, 86, 87, 89, 90], "output_pt": [74, 77], "output_tensor": [67, 69, 74, 75, 77], "output_vari": [59, 63, 64, 65, 66, 79, 80, 81, 82, 83, 84, 85, 86, 87, 89, 90, 91], "outputvari": 50, "over": [3, 23, 47, 48, 80, 84, 86, 88, 90], "overlap": [35, 79, 91], "overrid": [21, 46, 67, 74, 75, 76, 77], "overridden": 75, "overriden": 59, "ovverid": [69, 75], "own": [81, 85, 88], "p": [41, 47, 48, 50, 51, 70, 72, 80, 83, 90, 92], "p_test": 90, "p_train": 90, "packag": [2, 4, 90, 92], "pad": [54, 55, 87], "padding_typ": [54, 55], "page": 1, "pair": 43, "pang": 51, "paper": [1, 38, 41, 70, 83, 86], "param": [23, 82, 86, 90], "paramet": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 81, 82, 84, 86, 87, 89, 90], "parameter": 87, "parameter_domain": [64, 79, 90], "parameters_epoch": 89, "parametr": [35, 37, 54, 55, 64, 74, 79, 84, 87], "parametricod": 64, "parametricproblem": [2, 79, 90], "params_": [23, 89], "params_torch": 89, "paramtr": 50, "parent": [69, 75], "pari": [38, 53, 68], "part": [3, 81, 86, 90, 92], "partial": [37, 51, 54, 55, 57, 68, 69, 71, 72, 73, 74, 75, 80, 82, 83, 85, 87, 89], "particular": [0, 34, 35, 39, 50, 54, 55, 86, 89, 90, 92], "pass": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 22, 23, 31, 33, 34, 35, 36, 38, 39, 40, 41, 42, 44, 51, 52, 53, 56, 57, 59, 60, 63, 67, 69, 72, 74, 75, 76, 77, 79, 81, 82, 83, 85, 86, 88, 90], "pathologi": 53, "pattern": [44, 91], "pbar": 21, "pbc": [41, 91], "pcolor": 62, "pde": [5, 19, 38, 39, 50, 56, 71, 74, 80, 82, 87, 90], "pdf": [37, 44], "peculiar": 86, "pedagog": 83, "pengzhan": 57, "peopl": [3, 92], "per": 78, "perceptron": [35, 52, 90], "perdikari": [38, 41, 53, 68, 72], "perfect": 84, "perfectli": 91, "perform": [9, 21, 23, 30, 33, 34, 35, 37, 39, 40, 42, 49, 50, 51, 52, 53, 54, 55, 56, 57, 61, 69, 80, 81, 84, 86, 90], "period": [2, 5, 80], "periodicboundaryembed": [41, 91], "permeabl": 87, "permiss": 0, "permit": [0, 75, 80], "person": 0, "phase": [82, 84, 86], "phi": 39, "phi_k": 80, "physic": [1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 37, 38, 41, 45, 51, 53, 57, 67, 68, 69, 71, 72, 73, 74, 75, 81, 83, 84, 86, 89, 90, 91, 93], "pi": [10, 41, 66, 80, 82, 83, 84, 85, 86, 91], "pic1": 86, "pic2": 86, "pichi": 74, "pictur": 86, "pina": [0, 4, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 37, 39, 41, 45, 46, 50, 51, 52, 53, 54, 55, 57, 58, 59, 62, 63, 64, 65, 66, 78, 80, 82, 84, 85, 86, 87, 90, 91], "pinaprogressbar": 21, "pinn": [2, 24, 26, 56, 63, 67, 68, 69, 71, 73, 75, 79, 81, 82, 83, 85, 89, 91, 93], "pinn_feat": 84, "pinn_lean": 84, "pinn_learn": 84, "pinn_solution_0": 89, "pinninterfac": [2, 69, 72, 75], "pip": 87, "pipelin": 2, "pixel": 86, "pl": [79, 83, 91], "pl_modul": 21, "place": 33, "placehold": [19, 20, 21], "plai": 88, "plan": [3, 86], "pleas": [3, 84], "plenti": 90, "plethora": 81, "plot": [62, 79, 80, 83, 84, 85, 86, 88, 89, 90, 91], "plot_loss": [62, 79, 84], "plot_sampl": [62, 79], "plot_scatt": 88, "plot_trajectori": 80, "plotter": [2, 79, 83, 84, 85, 91], "plt": [80, 86, 87, 88, 89, 90, 91], "plu": [49, 50, 56], "plug": 90, "pm": 82, "pod": [5, 42], "pod_nn": 90, "pod_nn_stok": 90, "pod_rank": 90, "pod_rbf": 90, "podblock": 90, "podnn": 90, "podrbf": 90, "point": [19, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 39, 43, 49, 50, 56, 60, 62, 63, 67, 69, 72, 73, 75, 79, 80, 81, 83, 84, 85, 86, 88, 89, 90, 91], "pointwis": 75, "poisson": [5, 83, 89], "poisson_equ": 83, "poisson_problem": 90, "poisson_sol": 84, "polygon": 88, "polynomi": 43, "polynomial_matrix": 43, "poor": 85, "pop": 21, "popul": 19, "porou": 87, "porpus": 91, "portion": 0, "pose": 43, "posit": [35, 86, 92], "possibl": [47, 48, 59, 69, 74, 76, 79, 81, 85, 86, 91], "potenti": [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 87], "pow": 33, "power": [24, 43, 48], "powerloss": [2, 70, 80], "practic": [81, 83], "pratic": 86, "pre": [4, 89], "precis": [76, 81, 84], "predict": [84, 85, 86, 89, 90, 91], "prefer": 81, "preliminari": 84, "preprint": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 34, 37, 41, 49, 54, 55, 69, 70], "preprocess": 33, "present": [35, 53, 80, 83, 84, 85, 86, 87, 91], "pressur": [87, 90], "pretti": [80, 83, 87, 91], "previou": [80, 81, 84, 85, 86], "previous": 85, "principl": [5, 91], "print": [79, 80, 81, 83, 85, 86, 87, 88, 90], "probabl": [36, 92], "problem": [5, 19, 22, 23, 34, 35, 39, 49, 50, 56, 59, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 81, 82, 86, 93], "procedur": [79, 86], "proceed": [26, 44], "process": [2, 3, 19, 20, 40, 46, 89], "produc": [22, 76], "product": [39, 51, 57, 80, 92], "prof": 92, "professor": 92, "program": 81, "progress": [21, 76, 80, 81, 85], "project": [1, 3, 42, 49, 50, 54, 55, 56, 80, 86, 92], "projecting_net": [49, 54, 55, 56, 80, 87], "projection_oper": 50, "propag": 19, "proper": [2, 42, 90], "properli": [3, 81, 90], "properti": [9, 21, 24, 26, 30, 33, 38, 39, 40, 41, 42, 43, 50, 51, 57, 63, 64, 65, 66, 67, 68, 69, 72, 74, 75, 76, 77, 78], "propos": [84, 85], "provid": [0, 3, 21, 23, 36, 38, 81, 84, 85, 93], "pseudospectr": 80, "psi": 39, "pt": [79, 81, 83, 84, 85, 88, 91], "pts_0": 89, "pts_heart": 88, "pts_list": 88, "public": 1, "publish": 0, "pull": 3, "puor_r": 80, "purpos": 0, "push": 3, "put": 91, "py": 90, "pylint": 3, "pyplot": [80, 86, 87, 88, 89, 90, 91], "pytest": 4, "python": [4, 23, 79, 81, 93], "python3": 90, "pytorch": [5, 21, 75, 78, 86, 90, 93], "pytorch_lightn": [19, 20, 21, 81, 89, 90], "pytorchlightin": [69, 75], "pytorchlightn": 93, "qi": 69, "quad": [38, 47, 48, 68, 69, 71, 72, 73, 74, 75, 79, 80, 82, 83, 85, 86, 87, 91], "quadro": 90, "qualiti": [26, 31], "quatit": 79, "queri": 43, "question": 93, "quickli": 88, "quintic": 43, "quit": [84, 86], "r": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 25, 27, 28, 32, 34, 39, 41, 43, 50, 68, 69, 71, 72, 73, 74, 75, 77, 80, 86, 91], "r3": 19, "r3_callback": 19, "r3refin": 19, "r_i": 73, "r_j": 73, "radial": [2, 43, 90], "rage": 23, "rais": [19, 20, 33, 35, 40, 43, 61], "rand": [33, 86, 88], "randint": 90, "random": [24, 25, 26, 27, 28, 31, 32, 38, 63, 79, 80, 81, 85, 86, 88, 89], "randomli": 79, "randperm": 86, "rang": [23, 81, 86, 89, 90], "rangl": 39, "rank": [2, 42, 81], "rate": [68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 81, 83, 84, 85, 86], "ratio": 70, "ratio_train_test": 90, "ravel": 80, "rbapinn": 2, "rbf": [5, 43], "rbf_": 90, "rbf_err_": 90, "rbf_kernel": 90, "rbfblock": 90, "re": [3, 62, 86], "reach": [79, 80, 81, 84, 85, 86, 87, 89, 90, 91], "read": [84, 88, 89], "readi": 88, "real": [46, 47, 48, 50, 79, 80, 86, 90], "realli": [80, 86], "reamin": 90, "rebas": 3, "recognit": 44, "recommend": 81, "recomput": 42, "record": [33, 40], "rectangular": 86, "recurs": 60, "red": 91, "reduc": [5, 42, 46, 51, 57, 70, 74, 81, 86], "reducedordermodelsolv": 2, "reducelronplateau": 76, "reduct": [23, 46, 47, 48, 51, 57, 70, 74, 90], "reduction_network": 74, "refer": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 24, 34, 35, 37, 38, 39, 41, 44, 49, 50, 51, 52, 53, 54, 55, 56, 57, 63, 68, 69, 70, 71, 72, 73, 74, 75, 76, 79, 83, 87, 89, 91], "refin": 2, "reflect": 38, "region": 19, "regress": [38, 83, 90], "regular": [70, 80, 84, 86], "reiniti": 81, "rel": [3, 47, 48, 81, 83, 87, 90], "relat": [3, 23, 75], "relative_error_nn": 90, "relative_error_rbf": 90, "relative_error_test": 90, "relative_error_train": 90, "releas": [19, 86], "relev": [21, 79, 88], "reli": 81, "relu": [2, 12, 35, 36, 44, 85, 86], "rememb": 3, "remov": 84, "repeat": 86, "report": [81, 86], "repositori": 4, "repres": [22, 24, 26, 31, 35, 41, 67, 68, 69, 71, 72, 73, 74, 75, 79, 80, 81, 84, 86, 91], "represent": [42, 50, 69, 74, 80, 86], "reproduc": [43, 81, 86], "request": 3, "requir": [4, 33, 40, 76, 81, 82, 88, 90], "requires_grad": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 33, 40], "requires_grad_": 33, "resampl": 19, "research": [1, 39, 50, 56, 92], "resembl": 83, "reshap": [80, 86], "residu": [2, 19, 22, 23, 67, 73, 74, 77, 79, 82, 84, 85], "residualblock": 44, "residualfeedforward": 2, "resolut": [5, 62], "respect": [61, 68, 73, 75, 82, 86], "rest": 25, "restrict": 0, "result": [2, 24, 26, 28, 30, 31, 33, 37, 38, 51, 57, 61, 74, 79, 80, 81, 83, 84, 85, 86, 91], "retain": 19, "retrain": [86, 91], "retur": 63, "returin": 70, "return": [19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 60, 61, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 88, 89, 90, 91], "review": 72, "revolv": 81, "rewrit": [82, 91], "rh": 43, "right": [0, 34, 38, 39, 41, 43, 47, 48, 68, 73, 75, 80, 81, 83, 84, 86, 91], "rightarrow": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 38, 41, 50, 68, 69, 71, 72, 73, 74, 75, 77, 79, 80, 86, 91], "rm": [34, 39, 41, 50, 68, 69, 71, 72, 73, 74, 75, 77, 80, 85, 86], "role": 83, "rome2esolv": 77, "routin": [19, 24, 25, 26, 27, 28, 32, 81], "row": 43, "rozza": [1, 70, 90, 92], "rtype": [86, 90], "rule": [51, 57], "run": [3, 76, 80, 81, 83, 86, 88, 90], "rung": 80, "running_loss": 86, "runtimeerror": 35, "rvert": 73, "s00466": 35, "s42256": 51, "sai": 33, "said": 90, "sake": 88, "same": [33, 35, 51, 53, 69, 76, 81, 82, 83, 84, 85], "sampl": [19, 22, 24, 25, 26, 27, 28, 29, 31, 32, 38, 62, 63, 67, 68, 69, 70, 71, 72, 73, 75, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 91], "sample_bord": 88, "sample_everi": 19, "sample_numb": 80, "sample_surfac": [26, 31, 88], "sampled_point": 88, "sampler": 86, "sankaran": [41, 68], "sapinn": [2, 83], "satisfi": [22, 79, 84, 85, 89, 91], "save": [33, 62, 79, 80, 86, 89], "save_dir": 81, "save_logs_and_releas": 67, "saved_metr": 81, "saved_weight": 33, "saveparamet": 89, "scalar": [34, 39, 61], "scale": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 38, 42, 43, 51, 57, 62, 83], "scale_coeffici": 42, "scale_invari": 43, "scaler": 42, "scatter": [35, 86, 88, 89], "scenario": 83, "schedul": [68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 81, 83], "scheduler1": 76, "scheduler2": 76, "scheduler_discrimin": [69, 70], "scheduler_discriminator_kwarg": [69, 70], "scheduler_gener": 70, "scheduler_generator_kwarg": 70, "scheduler_kwarg": [68, 71, 72, 73, 74, 77, 83], "scheduler_model": [69, 75, 83], "scheduler_model_kwarg": [69, 75, 83], "scheduler_weight": 75, "scheduler_weights_kwarg": 75, "schmidt": 40, "schrodingerequ": 82, "scientif": [53, 57], "scipi": [43, 80, 87], "scratch": 80, "script": 3, "scriptmodul": [37, 58], "search": 19, "second": [35, 76, 86, 87], "section": [2, 79, 83], "see": [3, 24, 33, 40, 47, 48, 50, 56, 63, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 91], "seeam": 91, "seed": [81, 86], "seed_everyth": 81, "seek": [77, 91], "seen": [33, 81, 82], "select": [33, 37, 45, 62], "self": [21, 33, 35, 42, 63, 75, 76, 79, 80, 81, 82, 83, 84, 85, 86, 88, 89, 90, 91], "sell": 0, "send": 33, "separ": 3, "sequenti": [35, 51, 57, 83, 85, 86, 91], "seri": 80, "serv": 81, "set": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 23, 25, 26, 27, 28, 30, 31, 32, 33, 35, 42, 43, 47, 48, 59, 62, 63, 68, 75, 76, 81, 82, 84, 86, 90], "set_text": 88, "set_titl": [80, 86, 90, 91], "set_xlabel": 80, "set_ylabel": 80, "setminu": 27, "setup": [3, 78, 90], "sever": [79, 81, 84, 85, 90], "sgd": 76, "sh": 3, "shall": [0, 87], "shalom": 26, "shape": [25, 26, 27, 28, 31, 32, 35, 39, 43, 47, 48, 52, 53, 80, 86, 88, 90], "share": [33, 37, 58], "sharei": 80, "sharex": 80, "shift": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 43], "short": 79, "should": [3, 21, 22, 23, 26, 29, 31, 33, 35, 40, 46, 47, 48, 61, 62, 63, 67, 74, 75, 76, 77, 81, 84, 85, 91], "show": [21, 79, 80, 81, 82, 83, 84, 86, 87, 88, 89, 90, 91], "shown": [41, 62, 76, 81, 86], "shrink": [26, 31], "shuai": 57, "shyam": 68, "siam": [53, 57], "side": [43, 86], "sifan": [38, 53, 68], "sigma": [14, 34, 38, 39, 80, 83], "sigma_k": 38, "sigmoid": [2, 14, 15, 75, 86], "signific": [1, 3, 92], "silu": [2, 14], "sim": [38, 41, 83], "similar": [76, 84, 86, 90], "simipli": 81, "simpl": [67, 80, 81, 83, 86, 87, 88, 89, 90, 91, 93], "simplekernel": 86, "simpleod": [79, 81], "simplest": 81, "simplex": 31, "simplex_matrix": 31, "simplexdomain": [2, 63, 88], "simpli": [23, 59, 76, 80, 81, 86, 88, 90], "simplic": 88, "simul": [2, 79, 83], "simultan": 74, "sin": [13, 38, 41, 66, 80, 82, 83, 84, 85, 86, 91], "sinc": [81, 85, 86, 87, 90, 91], "sine": 38, "singl": [20, 39, 52, 53, 56, 67, 75, 76, 86], "singular": [42, 43, 90], "sinsin": 84, "sinsinab": 84, "siren": [2, 80], "sissa": 92, "site": 90, "sivashinski": 5, "size": [34, 35, 37, 39, 40, 45, 47, 48, 54, 55, 76, 79, 80, 83, 84, 85, 86, 90, 91], "size_averag": 46, "skeleton": 88, "skip": [53, 76], "slowli": 83, "small": [3, 80, 81, 86], "smaller": 86, "smither": 90, "smooth": [43, 91], "snapshot": 90, "snapshotproblem": 90, "so": [0, 3, 33, 67, 79, 81, 85, 88, 91], "societi": 90, "softmax": [2, 16], "softmin": [2, 17], "softplu": [84, 89], "softwar": [0, 1], "sokrati": 73, "sol_test": 80, "sol_train": 80, "sole": 91, "solut": [38, 62, 67, 68, 69, 70, 71, 72, 73, 74, 75, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 89, 90, 91, 93], "solv": [2, 38, 43, 68, 69, 70, 71, 72, 73, 74, 75, 77, 79, 81, 82, 83, 85, 89, 93], "solver": [20, 21, 62, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 89, 90, 91], "solverinterfac": [2, 62, 67, 70, 77, 78], "some": [3, 22, 33, 39, 43, 52, 53, 56, 76, 79, 80, 81, 83, 84, 85, 86, 87, 89, 90, 91, 92], "someth": [35, 76, 86], "sometim": 86, "sort": 21, "sourc": [1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 92, 93], "space": [3, 25, 27, 28, 32, 37, 39, 42, 45, 50, 54, 55, 56, 57, 74, 77, 80, 90], "span": [22, 24, 63], "spatial": [24, 26, 30, 35, 39, 41, 50, 59, 62, 63, 64, 65, 74, 79, 80, 82, 84, 85, 86, 89], "spatial_domain": [24, 26, 31, 64, 65, 66, 79, 81, 82, 83, 84, 85, 88, 89, 91], "spatial_domain2": 88, "spatial_domain_2": 24, "spatial_vari": 65, "spatialod": 65, "spatialproblem": [2, 64, 66, 71, 79, 81, 82, 83, 84, 85, 89, 91], "spatio": 83, "specfic": 87, "special": [76, 92], "specif": [23, 48, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 85, 87, 88, 91, 93], "specifi": [21, 22, 23, 38, 43, 46, 47, 48, 52, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 82, 86, 89, 91], "spectral": [2, 37, 44], "spectral_norm": 44, "spectralconvblock1d": 45, "spectralconvblock2d": 45, "spectralconvblock3d": 45, "speed": [79, 87], "sphinx": 4, "spline": 2, "split": 90, "sqrt": [10, 85, 86], "squar": [68, 69, 71, 72, 74, 75, 77, 79, 84, 85, 86, 87, 88, 89], "squeez": 87, "stabil": [89, 90], "stack": [33, 86], "standard": [21, 38, 42, 59, 79, 81, 83, 85, 91], "start": [3, 19, 20, 33, 35, 75, 76, 79, 81, 82, 86, 88, 89, 90], "state": [37, 58], "statement": 82, "static": [33, 43, 81, 84], "statist": 86, "std": [33, 42, 61], "stefano": 74, "step": [2, 67, 70, 76, 77, 79, 80, 81, 86], "stergiopulo": 73, "still": [47, 48, 84, 86], "stochast": 81, "stochasticweightaverag": 81, "stoke": 90, "stop": [20, 76, 80, 81, 84, 85, 86, 87, 90, 91], "storag": 33, "store": [21, 42, 67, 82], "store_log": 67, "str": [21, 23, 24, 25, 26, 27, 28, 30, 31, 32, 33, 41, 43, 46, 47, 48, 49, 51, 54, 55, 56, 57, 59, 61, 62, 63, 67], "straight": 81, "strategi": [20, 26, 31, 49, 51, 54, 56, 57, 74, 83, 86, 88], "strazzullo": 92, "strict": 76, "strictli": 3, "stride": 35, "string": [24, 26, 51, 57], "strongli": 81, "structur": [2, 50, 86], "stuart": [34, 37, 39, 49, 50, 54, 55, 56], "student": 92, "stuff": 79, "style": 3, "subdomain": 82, "subject": 0, "sublicens": 0, "subplot": [80, 86, 87, 88, 90, 91], "subsample_test_indic": 86, "subsample_train_indic": 86, "subset": [21, 23, 33, 50, 61, 86], "subsetrandomsampl": 86, "substanti": 0, "subtract": 79, "suchuan": 41, "suffici": 83, "suggest": [1, 79, 86, 87], "suit": 87, "suitabl": [67, 80], "sum": [23, 33, 34, 37, 46, 47, 48, 49, 51, 56, 57, 67, 70, 77, 84, 85, 86], "sum_": [39, 47, 48, 68, 69, 71, 72, 73, 74, 75, 77, 80, 86], "sum_j": [16, 17], "summari": [79, 80, 81, 83, 84, 85, 86, 87, 90, 91], "super": [21, 35, 76, 80, 82, 83, 84, 85, 86, 88, 90], "supervis": [2, 49, 51, 54, 56, 57, 77, 87], "supervisedsolv": [74, 80, 86, 87, 90], "support": [38, 62, 69, 74, 76, 90, 92], "suppos": 86, "sure": 3, "surfac": [26, 31], "swa_epoch_start": 81, "swa_lr": 81, "swalr": 81, "swap": 81, "switch": [20, 76, 81], "switch_callback": 20, "switchoptim": 20, "symbol": 79, "system": [23, 43, 81, 87, 89], "systemequ": [23, 82], "t": [3, 21, 51, 60, 66, 68, 79, 80, 81, 82, 84, 85, 86], "t0": [66, 82, 85], "t0_loss": 85, "t_i": 68, "t_k": 68, "tackl": 80, "take": [40, 49, 51, 56, 57, 61, 67, 80, 81, 82, 83, 84, 85, 86, 88, 89], "taken": [26, 28, 31, 87], "tanh": [2, 10, 11, 18, 37, 39, 51, 52, 53, 54, 55, 56, 57, 79, 81, 83, 90], "target": [46, 47, 48, 86], "tau": 86, "team": [81, 93], "techniqu": 90, "tell": [33, 79, 86], "tempor": [50, 59, 62, 63, 66, 79, 80, 82, 83], "temporal_domain": [66, 79, 82, 85], "temporal_vari": 66, "temporari": 89, "teng": 53, "tensor": [9, 22, 24, 25, 26, 27, 28, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 60, 61, 63, 67, 69, 70, 72, 74, 75, 76, 77, 79, 80, 81, 84, 85, 86, 87, 88, 89, 90], "tensor_nam": 88, "tensorboard": 81, "tensorboardlogg": 81, "term": [23, 64, 70, 83, 84, 89], "termin": 81, "test": [3, 4, 41, 80, 83, 84, 85, 86, 87, 90, 91], "test_data": 86, "test_load": 86, "text": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 41, 47, 48, 84, 85, 89], "texttt": 80, "than": [20, 26, 31, 40, 43, 63, 84, 87], "thank": [81, 84, 90, 92, 93], "thei": [33, 39, 52, 53, 56, 79, 81, 82, 84, 88, 91], "them": [20, 33, 42, 73, 76, 81, 84, 86, 88, 89, 90], "theorem": 51, "theori": [70, 73], "theta": [50, 80, 86, 91], "thi": [0, 3, 5, 9, 19, 20, 21, 23, 29, 30, 33, 35, 38, 39, 41, 43, 46, 50, 52, 53, 55, 56, 58, 59, 63, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92], "thin_plate_splin": [43, 90], "thing": 76, "think": 3, "third": 89, "those": [19, 43, 46, 79], "though": 83, "thought": 86, "thre": 85, "three": [22, 35, 37, 45, 79, 80, 81, 88], "three_domain_union": 88, "three_domain_union_point": 88, "through": [33, 35, 36, 75, 79, 82, 83, 84, 86, 87, 89], "throughout": 3, "thu": [76, 86], "tight_layout": [86, 91], "tild": [38, 41, 83, 86], "till": 81, "time": [5, 34, 39, 49, 50, 56, 66, 69, 70, 72, 74, 75, 79, 80, 81, 83, 85, 86, 89, 90, 91], "time_elaps": 81, "timedependentproblem": [2, 68, 79, 82, 85], "timedependentspatialproblem": 66, "timer": 81, "times": 80, "timespaceod": 79, "tip": 5, "titl": [1, 62, 83, 86, 87, 88], "title_list": 88, "tmp_dir": 89, "tmp_poisson_invers": 89, "togeth": [24, 58, 93], "toi": [79, 84, 86], "tolist": 80, "too": 90, "tool": 3, "top": [47, 48], "topic": [79, 88], "torch": [4, 9, 20, 22, 23, 25, 27, 28, 29, 30, 31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 60, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91], "torchrbf": 43, "torchvis": 86, "tort": 0, "toscano": 73, "total": [19, 24, 47, 48, 81, 86, 90], "total_it": [68, 69, 70, 71, 72, 73, 74, 75, 77], "totensor": 86, "tpu": [61, 76, 80, 81, 83, 87, 90, 91], "tqdmprogressbar": 21, "tra": 86, "track": [21, 26, 79, 84], "tracker": 21, "tradit": [49, 51, 54, 56, 57], "train": [2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 35, 41, 49, 51, 54, 56, 57, 62, 67, 68, 69, 70, 72, 74, 75, 76, 77, 78, 80, 82, 83, 84, 86, 87, 89, 90, 91], "train_data": 86, "train_load": 86, "trainabl": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 35, 42, 43, 86, 90], "trainer": [2, 19, 20, 21, 62, 76, 79, 80, 83, 84, 85, 86, 87, 89, 90, 91], "trainer_feat": 84, "trainer_learn": 84, "trainer_sapinn": 83, "trainig": 74, "training_step": [67, 70, 76, 77], "traininig": 62, "trainint": 80, "trajectori": 80, "tranform": 37, "transform": [34, 37, 38, 39, 53, 83, 86, 91], "transform_input": 86, "transformer_net": 53, "transit": 92, "translat": [51, 57, 91], "transpos": [35, 86], "transpose_no_overlap": 35, "transpose_overlap": 35, "treat": 89, "trend": [84, 89], "tri": 69, "triang": 90, "tribut": 26, "tricontourf": 90, "trigger": [19, 33], "tripcolor": 90, "tripl": 86, "trivial": [84, 88], "true": [24, 25, 26, 27, 28, 30, 31, 32, 33, 35, 39, 40, 42, 47, 48, 51, 52, 53, 56, 57, 62, 67, 69, 70, 74, 75, 76, 77, 79, 80, 81, 83, 84, 86, 87, 88, 89, 90, 91], "truncat": [41, 80], "trunk": [51, 57], "trunk_net": [51, 57], "truth": [89, 90], "truth_solut": [79, 81, 83, 84, 85, 91], "try": [80, 81, 83, 86, 88], "tunabl": [34, 39, 80], "tune": 84, "tupl": [21, 33, 35, 37, 45, 52, 53, 59, 67, 69, 70, 72, 75, 76, 77, 86], "tutori": 93, "twenti": 35, "twice": 82, "two": [5, 25, 27, 28, 32, 37, 38, 45, 46, 53, 74, 76, 79, 81, 83, 86, 88, 89], "type": [4, 19, 20, 21, 23, 24, 25, 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 60, 61, 63, 67, 68, 69, 70, 71, 72, 73, 74, 75, 77, 79, 84, 85, 86, 90], "typeerror": [33, 61], "typic": [79, 90], "u": [22, 41, 64, 65, 66, 68, 69, 71, 72, 73, 74, 75, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 89, 90, 91, 93], "u0": [80, 87], "u_": [85, 91], "u_expect": [66, 82, 85], "u_idx_nn": 90, "u_idx_rbf": 90, "u_t": [66, 85], "u_test": [87, 90], "u_test_nn": 90, "u_test_rbf": 90, "u_train": [87, 90], "u_train_nn": 90, "u_train_rbf": 90, "u_tt": [66, 85], "u_x": [64, 65, 79, 81], "u_xx": [83, 91], "ubbiali": [74, 90], "uliss": 75, "umh": 84, "unabl": 84, "understand": [5, 53, 83, 84, 86], "uniformli": [19, 26], "uninstal": 4, "union": [2, 63, 88, 89], "uniqu": [33, 80], "unit": [42, 76, 86, 87], "univers": [34, 49, 51, 80, 92], "unknown": [23, 50, 84, 85, 89], "unknown_paramet": 89, "unknown_parameter_domain": 89, "unknown_vari": 89, "unkwown": 84, "unlik": [49, 51, 54, 56, 57], "unreduc": [47, 48], "unseen": 90, "unsqueez": [86, 87], "unstructur": [5, 35], "unsupervis": 79, "unsur": 3, "until": 81, "up": [37, 41, 67, 86, 87], "updat": [24, 34, 39, 73, 76, 80, 81], "upper": 91, "us": [0, 2, 3, 4, 5, 9, 19, 20, 22, 23, 26, 31, 33, 35, 38, 39, 40, 42, 43, 49, 50, 51, 52, 53, 56, 57, 59, 61, 62, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 85, 86, 88, 89, 90, 93], "usag": [42, 81, 84, 86, 88], "user": [2, 4, 23, 58, 67, 68, 69, 70, 71, 72, 73, 74, 75, 77, 79, 84, 85, 86, 93], "userwarn": 90, "usual": [85, 91], "util": 86, "v": [34, 39, 68, 69, 71, 72, 74, 75, 77, 80, 91], "v_num": [21, 80, 81, 83, 84, 85, 87, 91], "val_loss": 76, "valid": [63, 76, 85, 90], "valu": [21, 22, 23, 24, 26, 35, 38, 40, 41, 42, 43, 57, 62, 63, 67, 68, 70, 76, 79, 80, 81, 82, 86, 88, 89, 90], "valueerror": [19, 20, 33, 43, 61], "values_mesh": 86, "var": 89, "vari": 89, "variabl": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 21, 23, 24, 25, 26, 27, 28, 30, 31, 32, 35, 43, 50, 51, 57, 59, 61, 62, 63, 64, 65, 66, 67, 79, 80, 81, 82, 84, 85, 86, 88, 89], "varianc": [38, 42, 70], "variant": 83, "variat": [38, 83], "varieti": 87, "variou": [3, 67, 84], "vector": [23, 34, 38, 39, 60, 61, 86], "vectori": [35, 61, 86], "veloc": [61, 85, 90], "velocity_field": 61, "verbos": 86, "veri": [79, 81, 83, 84, 86, 87, 88, 92], "version": [21, 81, 90], "vertex": 31, "vertic": 33, "via": [5, 51, 57, 78], "viscos": 82, "viscou": 82, "vision": 44, "visual": [5, 62, 79, 83, 86, 87, 88, 90], "vol": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 26], "volum": 1, "vstack": 33, "w": [24, 34, 39, 75, 80], "wa": [33, 83, 85], "wai": [3, 22, 80, 81, 83, 84, 85, 86, 87, 93], "wang": [38, 41, 53, 68, 72], "want": [9, 33, 67, 79, 80, 86, 89, 90], "warmli": 92, "warn": [40, 43, 76, 90], "warranti": 0, "wave": [5, 66, 89], "wave_equ": [66, 85], "wave_sol": 85, "we": [1, 3, 5, 23, 33, 51, 57, 67, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92], "webpag": 81, "weight": [33, 35, 73, 75, 81, 84, 86, 90], "weight_decai": 84, "weights_dict": 75, "weights_funct": 75, "welcom": 2, "well": [3, 43, 81, 85, 86, 90], "were": [81, 92], "what": [3, 76], "whatev": [69, 75, 76], "when": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 26, 31, 35, 42, 46, 48, 59, 76, 77, 81, 83, 85, 87], "whenev": 81, "where": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 23, 34, 35, 38, 39, 40, 41, 43, 47, 48, 49, 50, 52, 53, 56, 62, 63, 68, 69, 71, 72, 73, 74, 75, 77, 79, 80, 82, 83, 84, 85, 86, 88, 89, 90], "wherea": [62, 76], "whether": [0, 29, 40], "which": [3, 20, 23, 25, 26, 31, 38, 39, 40, 42, 43, 49, 53, 55, 56, 61, 63, 67, 69, 74, 76, 79, 80, 81, 82, 83, 84, 85, 86, 88, 89, 90, 91, 92], "while": [4, 23, 41, 68, 81, 86, 88], "whom": 0, "whose": [42, 59, 63, 76], "wide": 23, "width": 35, "wiki": [31, 40], "wikipedia": [31, 40], "wise": [51, 57], "wish": 86, "withing": 86, "without": [0, 20, 62, 76, 79, 81, 86, 88, 91], "work": [3, 41, 50, 61, 68, 70, 71, 74, 76, 81, 82, 86, 88], "worker": 81, "workload": 67, "workshop": 26, "wors": 85, "worst": 69, "worth": 83, "would": [1, 83, 86, 91], "wrap": [79, 81], "wrapper": [23, 36, 76], "write": [23, 61, 83, 84, 91], "written": [50, 83, 84, 85, 91, 92], "wv": [34, 39, 80], "x": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 22, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 59, 60, 62, 63, 64, 65, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91], "x0": [64, 65, 79, 81], "x0_loss": [79, 81], "x_": [16, 17, 41, 60], "x_1": 41, "x_i": [16, 17], "x_j": [16, 17], "x_max": 89, "x_min": 89, "x_n": [47, 48], "xlabel": 89, "xy": 85, "y": [22, 24, 25, 26, 27, 28, 31, 32, 34, 37, 43, 45, 47, 48, 54, 55, 57, 62, 76, 80, 84, 85, 86, 87, 88, 89, 90, 91], "y_max": 89, "y_min": 89, "y_n": [47, 48], "yaakov": 26, "yang": 72, "year": 1, "ylabel": 89, "ylim": 89, "you": [1, 2, 3, 4, 76, 79, 80, 81, 82, 83, 84, 85, 87, 88, 90, 91], "your": [1, 3, 76, 80, 81, 88], "yourself": [76, 79], "yu": 71, "yujun": 53, "z": [22, 24, 34, 37, 39, 45, 49, 50, 54, 55, 56, 57, 76], "zeng": 69, "zero": [23, 42, 81, 82, 85, 86], "zero_grad": 86, "zip": [88, 90], "zongyi": 37}, "titles": ["License", "Cite PINA", "Code Documentation", "How to contribute", "Installation", "PINA Tutorials", "AdaptiveCELU", "AdaptiveELU", "AdaptiveExp", "AdaptiveActivationFunctionInterface", "AdaptiveGELU", "AdaptiveMish", "AdaptiveReLU", "AdaptiveSIREN", "AdaptiveSiLU", "AdaptiveSigmoid", "AdaptiveSoftmax", "AdaptiveSoftmin", "AdaptiveTanh", "Adaptive Refinments callbacks", "Optimizer callbacks", "Processing callbacks", "Condition", "Equations", "CartesianDomain", "Difference", "EllipsoidDomain", "Exclusion", "Intersection", "Location", "OperationInterface", "SimplexDomain", "Union", "LabelTensor", "Averaging layers", "Continuous convolution", "EnhancedLinear", "Fourier Layers", "Fourier Feature Embedding", "Low Rank layer", "OrthogonalBlock", "Periodic Boundary Condition Embedding", "PODBlock", "RBFBlock", "Residual layer", "Spectral Convolution", "LpLoss", "LpLoss", "PowerLoss", "Averaging Neural Operator", "KernelNeuralOperator", "DeepONet", "FeedForward", "ResidualFeedForward", "FNO", "FourierIntegralKernel", "Low Rank Neural Operator", "MIONet", "MultiFeedForward", "Network", "Spline", "Operators", "Plotter", "AbstractProblem", "ParametricProblem", "SpatialProblem", "TimeDependentProblem", "PINNInterface", "CausalPINN", "CompetitivePINN", "GAROM", "GPINN", "PINN", "RBAPINN", "ReducedOrderModelSolver", "SAPINN", "SolverInterface", "SupervisedSolver", "Trainer", "Tutorial: Physics Informed Neural Networks on PINA", "Tutorial: Averaging Neural Operator for solving Kuramoto Sivashinsky equation", "Tutorial: PINA and PyTorch Lightning, training tips and visualizations", "Tutorial: The Equation Class", "Tutorial: Multiscale PDE learning with Fourier Feature Network", "Tutorial: Two dimensional Poisson problem using Extra Features Learning", "Tutorial: Two dimensional Wave problem with hard constraint", "Tutorial: Unstructured convolutional autoencoder via continuous convolution", "Tutorial: Two dimensional Darcy flow using the Fourier Neural Operator", "Tutorial: Building custom geometries with PINA Location class", "Tutorial: Resolution of an inverse problem", "Tutorial: Reduced order model (POD-RBF or POD-NN) for parametric problems", "Tutorial: One dimensional Helmotz equation using Periodic Boundary Conditions", "PINA Team", "Welcome to PINA\u2019s documentation!"], "titleterms": {"": [79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 91, 93], "1d": 82, "One": 91, "The": [82, 84, 85, 91], "abstractproblem": 63, "acceler": 81, "accuraci": 81, "activ": 2, "adapt": [2, 19], "adaptiveactivationfunctioninterfac": 9, "adaptivecelu": 6, "adaptiveelu": 7, "adaptiveexp": 8, "adaptivegelu": 10, "adaptivemish": 11, "adaptiverelu": 12, "adaptivesigmoid": 15, "adaptivesilu": 14, "adaptivesiren": 13, "adaptivesoftmax": 16, "adaptivesoftmin": 17, "adaptivetanh": 18, "an": 89, "autoencod": 86, "averag": [34, 49, 80], "background": 86, "boolean": 88, "boost": 81, "boundari": [41, 91], "build": [79, 86, 88], "built": 88, "burger": 82, "callback": [2, 19, 20, 21, 81], "cartesiandomain": 24, "causalpinn": 68, "cite": 1, "class": [79, 82, 88], "classifi": 86, "code": 2, "competitivepinn": 69, "condit": [22, 41, 91], "constraint": 85, "continu": [35, 86], "contribut": 3, "convolut": [35, 45, 86], "creat": 88, "custom": 88, "darci": 87, "data": [79, 80, 87], "deeponet": 51, "defin": 82, "definit": [84, 85, 86, 89, 91], "differ": [25, 86], "differenti": 79, "dimension": [84, 85, 87, 91], "document": [2, 93], "domain": 88, "ellipsoiddomain": 26, "embed": [38, 41, 83], "enhancedlinear": 36, "equat": [2, 23, 79, 80, 82, 91], "exampl": 82, "exclus": 27, "extra": 84, "featur": [2, 38, 83, 84], "feedforward": [52, 87], "filter": 86, "flow": 87, "fno": [54, 87], "fourier": [37, 38, 83, 87], "fourierintegralkernel": 55, "from": 4, "function": [2, 86], "fuorier": 87, "garom": 70, "gener": [79, 80, 87], "geometri": [2, 88], "get": 5, "gpinn": 71, "hard": 85, "helmotz": 91, "how": 3, "infer": 85, "inform": [5, 79], "input": 86, "instal": 4, "intersect": 28, "introduct": 89, "invers": 89, "k": 80, "kernelneuraloper": 50, "kuramoto": 80, "labeltensor": 33, "layer": [2, 34, 37, 39, 44], "learn": [5, 83, 84], "learnabl": 84, "licens": 0, "lightn": 81, "locat": [29, 88], "log": 81, "loss": 2, "low": [39, 56], "lploss": [46, 47], "memori": 81, "metric": 2, "mionet": 57, "mnist": 86, "model": [2, 85, 90], "multifeedforward": 58, "multiscal": 83, "network": [5, 59, 79, 83, 87, 91], "neural": [5, 49, 56, 79, 80, 87], "new": 82, "next": [79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 91], "nn": 90, "oper": [2, 5, 49, 56, 61, 80, 87, 88], "operationinterfac": 30, "optim": 20, "order": 90, "ordinari": 79, "orthogonalblock": 40, "parametr": 90, "parametricproblem": 64, "patch": 3, "pde": 83, "perform": 79, "period": [41, 91], "physic": [5, 79], "pina": [1, 2, 5, 79, 81, 83, 88, 89, 92, 93], "pinn": [72, 84], "pinninterfac": 67, "pip": 4, "plotter": 62, "pod": 90, "podblock": 42, "poisson": 84, "powerloss": 48, "problem": [2, 79, 80, 83, 84, 85, 87, 89, 90, 91], "process": 21, "pytorch": 81, "rank": [39, 56], "rbapinn": 73, "rbf": 90, "rbfblock": 43, "reduc": 90, "reducedordermodelsolv": 74, "refer": 90, "refin": 19, "residu": 44, "residualfeedforward": 53, "resolut": [86, 89], "sapinn": 75, "save": 81, "set": 2, "simpl": 79, "simplex": 88, "simplexdomain": 31, "sivashinski": 80, "small": 79, "solv": [80, 84, 87, 91], "solver": 2, "solverinterfac": 76, "sourc": 4, "spatialproblem": 65, "spectral": 45, "speed": 81, "spline": 60, "standard": 84, "start": 5, "stride": 86, "submit": 3, "supervis": 5, "supervisedsolv": 77, "team": 92, "timedependentproblem": 66, "tip": 81, "todo": 62, "train": [79, 81, 85], "trainer": [78, 81], "tutori": [5, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91], "two": [84, 85, 87], "union": 32, "unstructur": 86, "up": 81, "upsampl": 86, "us": [84, 87, 91], "v": 90, "via": [4, 86], "visual": 81, "wave": 85, "welcom": 93, "what": [79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 91], "write": 79}}) \ No newline at end of file