diff --git a/pySDC/projects/Monodomain/README.rst b/pySDC/projects/Monodomain/README.rst index 258afc6907..fd0e4d344d 100644 --- a/pySDC/projects/Monodomain/README.rst +++ b/pySDC/projects/Monodomain/README.rst @@ -59,7 +59,7 @@ We display here the stability domain of the ESDC and SDC methods, both with IMEX y'=\lambda_I y+\lambda_E y+\lambda_e y, with :math:`\lambda_I,\lambda_E,\lambda_e` representing :math:`f_I,f_E,f_e`, respectively. -We fix :math:`\lambda_E=-1` and vary the stiff terms :math:`\lambda_I,\lambda_e` only. We see as the ESDC method is stable for all tested values of :math:`\lambda_I,\lambda_e`, while SDC is not. +We fix :math:`\lambda_E=-1` and vary the stiff terms :math:`\lambda_I,\lambda_e` only. We see that the ESDC method is stable for all tested values of :math:`\lambda_I,\lambda_e`, while SDC is not. .. image:: ../../../data/stability_domain_IMEXEXP_EXPRK.png :scale: 60 % @@ -85,8 +85,8 @@ Here we consider three methods: * MLESDC: This is a multilevel version of ESDC with :math:`m=6` collocation nodes on the fine level and :math:`m=3` nodes on the coarse level. * PFASST: Combination of the PFASST parallelization method with MLESDC, using 24 processors. -We dispaly the number of iterations required by each method to reach a given tolerance and the residual at convergence. As ionic model we use again the ten Tusscher-Panfilov model. -We see as PFASST requires a reasonalbly small number of iterations, comparable to the serial counterparts ESDC and MLESDC. +We display the number of iterations required by each method to reach a given tolerance and the residual at convergence. As ionic model we use again the ten Tusscher-Panfilov model. +We see that PFASST requires a reasonalbly small number of iterations, comparable to the serial counterparts ESDC and MLESDC. .. image:: ../../../data/niter_VS_time.png :scale: 100 % diff --git a/pySDC/projects/Monodomain/problem_classes/MonodomainODE.py b/pySDC/projects/Monodomain/problem_classes/MonodomainODE.py index deb5cc48ea..bd9dcca1ce 100644 --- a/pySDC/projects/Monodomain/problem_classes/MonodomainODE.py +++ b/pySDC/projects/Monodomain/problem_classes/MonodomainODE.py @@ -88,7 +88,7 @@ def initial_value(self): # create initial value (as vector of vectors). Every variable is constant in space u0 = self.dtype_u(self.init, val=self.ionic_model.initial_values()) - # overwwrite the initial value with solution from file if desired + # overwrite the initial value with solution from file if desired if self.read_init_val: read_ok = self.read_reference_solution(u0, self.init_val_name, True) assert read_ok, "ERROR: Could not read initial value from file." @@ -168,7 +168,7 @@ def eval_f(self, u, t, fh=None): if fh is None: fh = self.dtype_f(init=self.init, val=0.0) - # eval ionic model rhs on u and put result in fh. All indices of the vector of vector fh mush be computed (list(range(self.size)) + # eval ionic model rhs on u and put result in fh. All indices of the vector of vector fh must be computed (list(range(self.size)) self.eval_expr(self.ionic_model.f, u, fh, list(range(self.size)), False) # apply stimulus fh.val_list[0] += self.Istim(t) diff --git a/pySDC/projects/Monodomain/problem_classes/space_discretizazions/Parabolic_DCT.py b/pySDC/projects/Monodomain/problem_classes/space_discretizazions/Parabolic_DCT.py index baabe1a9e4..98e55ba702 100644 --- a/pySDC/projects/Monodomain/problem_classes/space_discretizazions/Parabolic_DCT.py +++ b/pySDC/projects/Monodomain/problem_classes/space_discretizazions/Parabolic_DCT.py @@ -8,7 +8,7 @@ class Parabolic_DCT(RegisterParams): """ - A class for the spatial discreitzation of the parabolic part of the monodomain equation. + A class for the spatial discretization of the parabolic part of the monodomain equation. Here we discretize the spatial domain with a uniform mesh and use the discrete cosine transform (DCT) to discretize the Laplacian operator. The DCT is a real-to-real type of Fourier transform that is well suited for Neumann boundary conditions. diff --git a/pySDC/projects/Monodomain/run_scripts/run_MonodomainODE_cli.py b/pySDC/projects/Monodomain/run_scripts/run_MonodomainODE_cli.py index c4cc0e812e..9fca058c78 100644 --- a/pySDC/projects/Monodomain/run_scripts/run_MonodomainODE_cli.py +++ b/pySDC/projects/Monodomain/run_scripts/run_MonodomainODE_cli.py @@ -25,7 +25,7 @@ def main(): "--num_nodes", default="4", type=list_of_ints, - help="list of ints (as '5,3', i.e. no brakets): number of collocation nodes per level", + help="list of ints (as '5,3', i.e. no brackets): number of collocation nodes per level", ) parser.add_argument("--num_sweeps", default="1", type=list_of_ints, help="list of ints: number of sweeps per level") parser.add_argument( diff --git a/pySDC/projects/Monodomain/sweeper_classes/exponential_runge_kutta/imexexp_1st_order.py b/pySDC/projects/Monodomain/sweeper_classes/exponential_runge_kutta/imexexp_1st_order.py index bdac694846..d12cb6aa49 100644 --- a/pySDC/projects/Monodomain/sweeper_classes/exponential_runge_kutta/imexexp_1st_order.py +++ b/pySDC/projects/Monodomain/sweeper_classes/exponential_runge_kutta/imexexp_1st_order.py @@ -128,7 +128,7 @@ def compute_lambda_phi_Qmat_exp(self): # To save computations we recompute that only if u[0] has changed. # Also, we check only for the first component u[0][0] of u[0] to save more computations. # Remember that u[0][0] is a sub_vector representing the potential on the whole mesh and is enough to check if u[0] has changed. - if abs(self.u_old[0] - self.level.u[0][0]) > 1e-10 * abs(self.level.u[0][0]): + if not np.allclose(self.u_old[0].numpy_array[:], self.level.u[0][0].numpy_array[:], rtol=1e-10, atol=1e-10): self.u_old[0].numpy_array[:] = self.level.u[0][0].numpy_array[:] @@ -149,7 +149,7 @@ def compute_lambda_phi_Qmat_exp(self): # evaluates phi_1(dt*delta_i*lambda) for delta_i = c_i - c_{i-1} self.phi_one = self.phi_eval_lists(P, L.dt * self.delta, [1], self.phi_one, self.lmbda, True) - # compute weight for the integration of \int_0^ci exp(dt*(ci-r)lmbda)*PiQ(r)dr, where PiQ(r) is a polynomial inteprolating + # compute weight for the integration of \int_0^ci exp(dt*(ci-r)lmbda)*PiQ(r)dr, where PiQ(r) is a polynomial interpolating # Q(c_i)=Q[i]. # We do so as \int_0^ci exp(dt*(ci-r)lmbda)*PiQ(r)dr = \sum_{j=0}^{M-1} Qmat_exp[i,j]*Q[j] if not hasattr(self, "Qmat_exp"): @@ -187,7 +187,7 @@ def integrate(self): self.tmp = P.dtype_u(init=P.init, val=0.0) for k in range(M): - # self.Q[k] = L.f[k + 1].exp + self.lmbda * (L.u[0] - L.u[k + 1]) # at the indeces of the exponential rhs, otherwsie 0 + # self.Q[k] = L.f[k + 1].exp + self.lmbda * (L.u[0] - L.u[k + 1]) # at the indeces of the exponential rhs, otherwise 0 self.Q[k].zero_sub(P.rhs_exp_indeces) self.Q[k].iadd_sub(L.u[0], P.rhs_exp_indeces) self.Q[k].axpy_sub(-1.0, L.u[k + 1], P.rhs_exp_indeces) @@ -294,47 +294,6 @@ def compute_end_point(self): return None - def predict(self): - """ - Predictor to fill values at nodes before first sweep - - Default prediction for the sweepers, only copies the values to all collocation nodes - and evaluates the RHS of the ODE there - """ - - # get current level and problem description - L = self.level - P = L.prob - - # evaluate RHS at left point - L.f[0] = P.eval_f(L.u[0], L.time) - - for m in range(1, self.coll.num_nodes + 1): - # copy u[0] to all collocation nodes, evaluate RHS - if self.params.initial_guess == "spread": - L.u[m] = P.dtype_u(L.u[0]) - L.f[m] = P.eval_f(L.u[m], L.time + L.dt * self.coll.nodes[m - 1]) - # start with zero everywhere - elif self.params.initial_guess == "zero": - L.u[m] = P.dtype_u(init=P.init, val=0.0) - L.f[m] = P.dtype_f(init=P.init, val=0.0) - # start with random initial guess - elif self.params.initial_guess == "random": - L.u[m] = P.dtype_u(init=P.init, val=self.rng.rand(1)[0]) - L.f[m] = P.dtype_f(init=P.init, val=self.rng.rand(1)[0]) - else: - raise ParameterError(f"initial_guess option {self.params.initial_guess} not implemented") - - # indicate that this level is now ready for sweeps - L.status.unlocked = True - L.status.updated = True - - # self.update_lmbda_yinf_status(outdated=True) - - # def update_lmbda_yinf_status(self, outdated): - # if not self.level.prob.constant_lambda_and_phi and outdated: - # self.lambda_and_phi_outdated = True - def compute_residual(self, stage=''): """ Computation of the residual using the collocation matrix Q diff --git a/pySDC/projects/Monodomain/sweeper_classes/runge_kutta/imexexp_1st_order.py b/pySDC/projects/Monodomain/sweeper_classes/runge_kutta/imexexp_1st_order.py index 1c5a765c52..ed3cc33d26 100644 --- a/pySDC/projects/Monodomain/sweeper_classes/runge_kutta/imexexp_1st_order.py +++ b/pySDC/projects/Monodomain/sweeper_classes/runge_kutta/imexexp_1st_order.py @@ -11,7 +11,6 @@ class imexexp_1st_order(sweeper): First-order IMEXEXP sweeper using implicit/explicit/exponential Euler as base integrator In the cardiac electrphysiology community this is known as Rush-Larsen scheme. - The underlying intergrator is collocation method, leading to standard SDC. """ def __init__(self, params):