From cbaae05aa15b469f31f486dc37f52395919c0253 Mon Sep 17 00:00:00 2001 From: jakob-fritz <37077134+jakob-fritz@users.noreply.github.com> Date: Wed, 24 Apr 2024 10:34:09 +0200 Subject: [PATCH] Make create_gitlab_ci branch up-to-date before merging into master (#418) * first working SDC version (M and Minv) * Update playground.py * cleaning up * Added some hyphens in plots (#389) * Removed seperate file for GPU Dahlquist implementation (#391) Co-authored-by: Thomas * Review (#388) * Bug is fixed and added new code * new code for the table * Edits in markdown file * some edits in test * Bugs fix * Codecov * I cleaned up my code and separated classes to make it easier to work with. It is not ready yet; if Codecov fails, I will include more tests. * forgot black * flake8 * bug fix * Edits codes according to the comments * Edited codes according to the comments in the GitHub * Defined new function in stability_simulation.py to check stability for given points and excluded codecov function that generates a table. * small edits for codecov * removed no cover * NCCL communicators (#392) * Added wrapper for MPI communicator to use NCCL under the hood * Small fix * Moved NCCL communicator wrapper to helpers --------- Co-authored-by: Thomas * Version bump for new release * proper readme and link * Started playground for machine learning generated initial guesses for (#394) SDC * playing with FEniCS * blackening * Bug fix (#395) * readme file changes * fixed bugs for stability plots and some edits in README file * some edits * typo in citation * Bump version * Bug fix (#396) * Clear documentation and some edits in the code * forgot black * some changes * bump version * Cosmetic changes (#398) * Parallel SDC (Reloaded) project (#397) TL: Added efficient diagonal preconditioners and associated project. Coauthored by @caklovicka * Generic multi-component mesh (#400) * Generic multicomponent mesh * new try * Added a test for MultiComponentMesh * Test that the type is conserved also after numpy operations * Added documentation for how to use `MultiComponentMesh` * Changed formatting of the documentation * Update ci_pipeline.yml * version freak show * version freak show II * version freak show III * version freak show IV * Update ci_pipeline.yml * version freak show V * 2D Brusselator problem (#401) * Added 2D Brusselator problem from Hairer-Wanner II. Thanks @grosilho for the suggestion! * Added forgotten pytest marker * Fix brain afk error * Added work counter for right hand side evaluations * Removed file for running Brusselator from project * Retry at removing the file * I need to go to git school * Datatype `DAEMesh` for DAEs (#384) * Added DAE mesh * Updated all DAE problems and the SDC-DAE sweeper * Updated playgrounds with new DAE datatype * Adapted tests * Minor changes * Black.. :o * Added DAEMesh only to semi-explicit DAEs + update for FI-SDC and ProblemDAE.py * Black :D * Removed unnecessary approx_solution hook + replaced by LogSolution hook * Update WSCC9 problem class * Removed unnecessary comments * Removed test_misc.py * Removed registering of newton_tol from child classes * Update test_problems.py * Rename error hook class for logging global error in differential variable(s) * Added MultiComponentMesh - @brownbaerchen + @tlunet + @pancetta Thank ugit add pySDC/implementations/datatype_classes/MultiComponentMesh.py * Updated stuff with new version of DAE data type * (Hopefully) faster test for WSCC9 * Test for DAEMesh * Renaming * ..for DAEMesh.py * Bug fix * Another bug fix.. * Preparation for PDAE stuff (?) * Changes + adapted first test for PDAE stuff * Commented out test_WSCC9_SDC_detection() - too long runtime * Minor changes for test_DAEMesh.py * Extended test for DAEMesh - credits for @brownbaerchen * Test for HookClass_DAE.py * Update for DAEMesh + tests * :tada: - speed up test a bit (at least locally..) * Forgot to enable other tests again * Removed if-else-statements for mesh type * View for unknowns in implSysFlatten * Fix for RK sweeper - changed nodes in BackwardEuler class (#403) * Made aborting the step at growing residual optional (#405) * `pySDC`-build-in `LagrangeApproximation` class in `SwitchEstimator` (#406) * SE now uses LagrangeApproximation class + removed Lagrange class in SE * Removed log message again (not corresponding to PR) * version bump * Added hook for logging to file (#410) * Monodomain project (#407) * addded some classes from oldexplicit_stabilized branch. Mainly, the problems description, datatype classes, explicit stabilized classes. Tested for IMEX on simple problems * added implicit,explicit,exponential integrator (in electrophysiology aka Rush-Larsen) * added exponential imex and mES, added parabolic_system in vec format * added new stabilized integrators using multirate, splitting and exponential approaches * before adding exponential_runge_kutta as underlying method, instead of the traditional collocation methods * added first order exponential runge kutta as underlying collocation method. To be generalized to higher order * generalized exponential runge kutta to higher order. Added exponential multirate stabilized method using exponential RK but must tbe checked properly * fixed a few things * optimized a few things * renamed project ExplicitStabilized to Monodomain * removed deprecated problems * fixed some renaming issues * did refactoring of code and put in Monodomain_NEW * removed old code and renamed new code * added finite difference discretization * added many things, cant remember * old convergence_controller * addded some classes from oldexplicit_stabilized branch. Mainly, the problems description, datatype classes, explicit stabilized classes. Tested for IMEX on simple problems * added implicit,explicit,exponential integrator (in electrophysiology aka Rush-Larsen) * added exponential imex and mES, added parabolic_system in vec format * added new stabilized integrators using multirate, splitting and exponential approaches * before adding exponential_runge_kutta as underlying method, instead of the traditional collocation methods * added first order exponential runge kutta as underlying collocation method. To be generalized to higher order * generalized exponential runge kutta to higher order. Added exponential multirate stabilized method using exponential RK but must tbe checked properly * fixed a few things * optimized a few things * renamed project ExplicitStabilized to Monodomain * removed deprecated problems * fixed some renaming issues * did refactoring of code and put in Monodomain_NEW * removed old code and renamed new code * added finite difference discretization * added many things, cant remember * added smooth TTP model for conv test, added DCT for 2D and 3D problems * added plot stuff and run scripts * fixed controller to original * removed explicit stabilized files * fixed other files * removed obsolete splittings from ionic models * removed old sbatch scripts * removed mass transfer and sweeper * fixed something * removed my base transfer * removed hook class pde * removed FD files * fixed some calls to FD stuff * removed FEM FEniCSx files * renamed FD_Vector to DCT_Vector * added hook for output and visualization script * removed plot scripts * removed run scripts, except convergence * removed convergence experiments script * fixed TestODE * added stability test in run_TestODE * added stability test in run_TestODE * added stability test in run_TestODE * removed obsolete stuff in TestODE * removed unneeded stuff from run_MonodomainODE * cleaned a bit run_MonodomainODE * removed utils/ * added few comments, cleaned a bit * removed schedule from workflow * restored tutorial step 7 A which I has modified time ago * run black on monodomain project * fixed a formatting thing * reformatted everything with black * Revert "revert formatted with black" This reverts commit 82c82e9eb5396854c4892e1667b13975df3fb6bb. * added environment file for monodomain project, started to add stuff in workflow * added first test * added package tqdm to monodomain environment * added new TestODE using DCT_vectors instead of myfloat, moved phi_eval_lists from MonodomainODE to the sweeper * deleted old TestODE and myfloat stuff * renamed TestODEnew to TestODE * cleaned a bit * added stability, convergence and iterations tests. Changed a bit other scripts as needed * reactivated other tests in workflow * removed my tests temporarly * added monodomain marker to project pyproject.toml * changed files and function names for tests * fixed convergence test * made one test a bit shorter * added test for SDC on HH and fixed missing feature in SDC imex sweeper for monodomain * reformatted with correct black options * fixed a lint error * another lint error * adding tests with plot * modified convergence test * added test iterations in parallel * removed plot from tests * added plots without writing to file * added write to file * simplified plot * new plot * fixed plot in iterations parallel * added back all tests and plots * cleaned a bit * added README * fixed readme * modified comments in controllers * try to compute phi every step * removed my controllers, check u changed before comuting phis * enabled postprocessing in pipeline * added comments to data_type classes, removed unnecessary methods * added comments to hooks * added comments to the problem classes * added comments to the run scripts * added comments to sweepers and transfer classes * fixed the readme * decommented if in pipeline * removed recv_mprobe option * changed back some stuff outiside of monodomain project * same * again * fixed Thomas hints * removed old unneeded move coverage folders * fixed previously missed Thomas comments * begin change datatype * changed run_Monodomain * added prints * fixed prints * mod print * mod print * mod print * mod print * rading init val * rading init val * removed prints * removed prints * checking longer time * checking longer time * fixed call phi eval * trying 2D * trying 2D * new_data type passing tests * removed coverage folders * optmized phi eval lists * before changing phi type * changed eval phi lists * polished a bit * before switch indeces * reformatted phi computaiton to its traspose * before changing Q * optimized integral of exp terms * changed interfate to c++ code * moved definition of dtype u f * tests passed after code refactoring * Generic MPI FFT class (#408) * Added generic MPIFFT problem class * Fixes * Generalized to `xp` in preparation for GPUs * Fixes * Ported Allen-Cahn to generic MPI FFT implementation * Ported Gray-Scott to generic MPI FFT (#412) * Ported Gray-Scott to generic MPI FFT class * `np` -> `xp` * Reverted poor changes * Update README.md (#413) Added the ExaOcean grant identified and the "Supported by the European Union - NextGenerationEU." clause that they would like us to display. * TIME-X Test Hackathon @ TUD: Test for `SwitchEstimator` (#404) * Added piecewise linear interpolation to SwitchEstimator * Started with test for SwitchEstimator [WIP] * Test to proof sum_restarts when event occuring at boundary * Started with test to check adapt_interpolation_info [WIP] * Added test for SE.adapt_interpolation_info() * Update linear interpolation + logging + changing tolerances * Test for linear interpolation + update of other test * Correction for finite difference + adaption tolerance * Added test for DAE case for SE * Choice of FD seems to be important for performance of SE * Removed attributes from dummy probs (since the parent classes have it) * Test for dummy problems + using functions from battery_model.py * Moved standard params for test to function * Updated hardcoded solutions for battery models * Updated hardcoded solutions for DiscontinuousTestODE * Updated docu in SE for FDs * Lagrange Interpolation works better with baclward FD and alpha=0.9 * Added test for state function + global error * Updated LogEvent hooks * Updated hardcoded solutions again * Adapted test_problems.py * Minor changes * Updated tests * Speed-up test for buck converter * Black.. * Use msg about convergence info in Newton in SE * Moved dummy problem to file * Speed up loop using mask * Removed loop * SDC-DAE sweeper for semi-explicit DAEs (#414) * Added SI-SDC-DAE sweeper * Starte with test for SemiImplicitDAE * Test for SI-SDC sweeper * Clean-up * Removed parameter from function * Removed test + changed range of loop in SI-sweeper --------- Co-authored-by: Robert Speck Co-authored-by: Thomas Baumann <39156931+brownbaerchen@users.noreply.github.com> Co-authored-by: Thomas Co-authored-by: Ikrom Akramov <96234984+ikrom96git@users.noreply.github.com> Co-authored-by: Thibaut Lunet Co-authored-by: Lisa Wimmer <68507897+lisawim@users.noreply.github.com> Co-authored-by: Giacomo Rosilho de Souza Co-authored-by: Daniel Ruprecht --- .gitignore | 6 + CHANGELOG.md | 2 + CITATION.cff | 4 +- README.md | 5 +- docs/contrib/02_continuous_integration.md | 13 +- docs/source/conf.py | 6 +- docs/source/index.rst | 2 + docs/source/projects/monodomain.rst | 1 + docs/source/projects/parallelSDC_reloaded.rst | 1 + docs/source/projects/second_order.rst | 1 + etc/environment-base.yml | 2 +- pySDC/core/Lagrange.py | 16 +- pySDC/core/Problem.py | 29 + pySDC/core/Sweeper.py | 132 +- pySDC/helpers/NCCL_communicator.py | 98 + pySDC/helpers/problem_helper.py | 2 +- pySDC/helpers/testing.py | 104 + .../adaptivity.py | 11 +- .../implementations/datatype_classes/mesh.py | 116 +- pySDC/implementations/hooks/log_solution.py | 79 + pySDC/implementations/hooks/plotting.py | 76 + .../problem_classes/AllenCahn_1D_FD.py | 22 +- .../problem_classes/AllenCahn_MPIFFT.py | 146 +- .../problem_classes/Brusselator.py | 189 ++ .../problem_classes/DiscontinuousTestODE.py | 63 +- .../problem_classes/GrayScott_MPIFFT.py | 341 ++- .../problem_classes/HarmonicOscillator.py | 1 + .../implementations/problem_classes/Lorenz.py | 17 +- .../NonlinearSchroedinger_MPIFFT.py | 147 +- .../problem_classes/TestEquation_0D.py | 106 +- .../problem_classes/TestEquation_0D_GPU.py | 16 - .../generic_MPIFFT_Laplacian.py | 181 ++ .../problem_classes/odeScalar.py | 213 ++ .../problem_classes/odeSystem.py | 926 ++++++++ .../sweeper_classes/Runge_Kutta.py | 77 +- .../sweeper_classes/generic_implicit.py | 17 +- .../sweeper_classes/generic_implicit_MPI.py | 23 +- .../HeatEquation_1D_FEniCSx_matrix_forced.py | 245 ++ ...HeatEquation_1D_FEniCSx_matrix_forced_2.py | 272 +++ .../FEniCSx/HookClass_FEniCS_output.py | 79 + pySDC/playgrounds/FEniCSx/__init__.py | 0 pySDC/playgrounds/FEniCSx/heat_equation_M.py | 209 ++ .../playgrounds/FEniCSx/heat_equation_raw.py | 108 + pySDC/playgrounds/FEniCSx/playground.py | 43 + pySDC/playgrounds/ML_initial_guess/README.md | 19 + pySDC/playgrounds/ML_initial_guess/heat.py | 262 +++ pySDC/playgrounds/ML_initial_guess/ml_heat.py | 130 ++ pySDC/playgrounds/ML_initial_guess/sweeper.py | 24 + pySDC/playgrounds/ML_initial_guess/tensor.py | 131 ++ pySDC/projects/DAE/misc/DAEMesh.py | 12 + pySDC/projects/DAE/misc/HookClass_DAE.py | 74 +- pySDC/projects/DAE/misc/ProblemDAE.py | 16 +- .../DAE/problems/DiscontinuousTestDAE.py | 36 +- pySDC/projects/DAE/problems/WSCC9BusSystem.py | 86 +- pySDC/projects/DAE/problems/simple_DAE.py | 65 +- .../DAE/problems/synchronous_machine.py | 41 +- .../DAE/problems/transistor_amplifier.py | 17 +- .../DAE/run/fully_implicit_dae_playground.py | 15 +- .../projects/DAE/run/run_convergence_test.py | 7 +- pySDC/projects/DAE/run/run_iteration_test.py | 7 +- .../DAE/run/synchronous_machine_playground.py | 33 +- .../projects/DAE/sweepers/SemiImplicitDAE.py | 190 ++ .../DAE/sweepers/fully_implicit_DAE.py | 60 +- pySDC/projects/Monodomain/README.rst | 94 + .../Monodomain/datatype_classes/my_mesh.py | 5 + .../Monodomain/etc/environment-monodomain.yml | 24 + .../Monodomain/hooks/HookClass_pde.py | 34 + .../hooks/HookClass_post_iter_info.py | 34 + .../problem_classes/MonodomainODE.py | 408 ++++ .../Monodomain/problem_classes/TestODE.py | 119 + .../ionicmodels/cpp/__init__.py | 5 + .../ionicmodels/cpp/bindings_definitions.cpp | 83 + .../ionicmodels/cpp/bistable.h | 88 + .../ionicmodels/cpp/compilation_command.txt | 8 + .../ionicmodels/cpp/courtemanche.h | 575 +++++ .../ionicmodels/cpp/hodgkinhuxley.h | 177 ++ .../ionicmodels/cpp/ionicmodel.h | 61 + .../ionicmodels/cpp/tentusscher.h | 542 +++++ .../ionicmodels/cpp/tentusscher_smooth.h | 550 +++++ .../space_discretizazions/Parabolic_DCT.py | 340 +++ .../run_scripts/run_MonodomainODE.py | 418 ++++ .../run_scripts/run_MonodomainODE_cli.py | 135 ++ .../Monodomain/run_scripts/run_TestODE.py | 301 +++ .../imexexp_1st_order.py | 301 +++ .../runge_kutta/imexexp_1st_order.py | 145 ++ .../TransferVectorOfDCTVectors.py | 40 + .../transfer_classes/Transfer_DCT_Vector.py | 70 + .../Monodomain/utils/data_management.py | 107 + .../visualization/show_monodomain_sol.py | 99 + pySDC/projects/PinTSimE/battery_model.py | 23 +- pySDC/projects/PinTSimE/buck_model.py | 2 +- .../PinTSimE/discontinuous_test_ODE.py | 3 +- pySDC/projects/PinTSimE/estimation_check.py | 14 +- .../projects/PinTSimE/hardcoded_solutions.py | 197 +- .../PinTSimE/paper_PSCC2024/log_event.py | 8 +- pySDC/projects/PinTSimE/switch_estimator.py | 179 +- pySDC/projects/Resilience/README.rst | 16 +- pySDC/projects/Resilience/paper_plots.py | 58 +- pySDC/projects/Resilience/strategies.py | 6 +- pySDC/projects/Resilience/work_precision.py | 9 +- .../Second_orderSDC/{README.md => README.rst} | 39 +- .../Second_orderSDC/check_data_folder.py | 2 +- ...dampedharmonic_oscillator_run_stability.py | 67 - .../harmonic_oscillator_params.py | 36 + .../harmonic_oscillator_run_points.py | 31 + .../harmonic_oscillator_run_stab_interval.py | 41 + .../harmonic_oscillator_run_stability.py | 33 + .../Second_orderSDC/penningtrap_Simulation.py | 922 +------- .../penningtrap_run_Hamiltonian_error.py | 6 +- .../Second_orderSDC/penningtrap_run_error.py | 8 +- .../penningtrap_run_work_precision.py | 12 +- pySDC/projects/Second_orderSDC/plot_helper.py | 406 ++++ .../Second_orderSDC/stability_simulation.py | 335 +++ .../preconditioner_playground_MPI.py | 3 +- .../projects/parallelSDC_reloaded/.gitignore | 2 + pySDC/projects/parallelSDC_reloaded/README.md | 36 + .../projects/parallelSDC_reloaded/__init__.py | 1 + .../allenCahn_accuracy.py | 119 + .../parallelSDC_reloaded/allenCahn_setup.py | 56 + .../chemicalReaction_accuracy.py | 112 + .../chemicalReaction_setup.py | 39 + .../parallelSDC_reloaded/convergence.py | 76 + .../jacobiElliptic_accuracy.py | 114 + .../jacobiElliptic_setup.py | 53 + .../parallelSDC_reloaded/kaps_accuracy.py | 115 + .../parallelSDC_reloaded/kaps_setup.py | 38 + .../parallelSDC_reloaded/lorenz_accuracy.py | 119 + .../parallelSDC_reloaded/lorenz_setup.py | 42 + .../parallelSDC_reloaded/nilpotency.py | 66 + .../protheroRobinsonAutonomous_accuracy.py | 126 ++ .../protheroRobinsonAutonomous_setup.py | 48 + .../protheroRobinson_accuracy.py | 126 ++ .../protheroRobinson_setup.py | 47 + .../parallelSDC_reloaded/scripts/__init__.py | 5 + .../scripts/_dataRef.json | 1999 +++++++++++++++++ .../parallelSDC_reloaded/scripts/crop.sh | 9 + .../scripts/fig01_conv.py | 97 + .../scripts/fig02_stab.py | 80 + .../scripts/fig03_lorenz.py | 176 ++ .../scripts/fig04_protheroRobinson.py | 107 + .../scripts/fig05_allenCahn.py | 131 ++ .../parallelSDC_reloaded/scripts/run.sh | 9 + .../parallelSDC_reloaded/scripts/utils.py | 1 + .../parallelSDC_reloaded/stability.py | 78 + pySDC/projects/parallelSDC_reloaded/utils.py | 324 +++ .../vanderpol_accuracy.py | 118 + .../parallelSDC_reloaded/vanderpol_setup.py | 57 + .../test_multicomponent_mesh.py | 64 + pySDC/tests/test_helpers/.gitignore | 1 + pySDC/tests/test_helpers/test_testing.py | 62 + pySDC/tests/test_hooks/test_log_to_file.py | 85 + .../test_problems/test_AllenCahn_1D_FD.py | 1 - pySDC/tests/test_problems/test_Brusselator.py | 40 + .../test_problems/test_GrayScottMPIFFT.py | 58 + .../test_AC/test_simple_forcing.py | 14 +- .../test_projects/test_DAE/test_DAEMesh.py | 76 + .../test_DAE/test_HookClass_DAE.py | 73 + .../test_DAE/test_SemiImplicitDAE.py | 353 +++ ..._sweeper.py => test_fully_implicit_DAE.py} | 23 +- .../tests/test_projects/test_DAE/test_misc.py | 20 - .../test_projects/test_DAE/test_problems.py | 147 +- .../test_monodomain_convergence.py | 193 ++ .../test_monodomain_iterations.py | 120 + .../test_monodomain_iterations_parallel.py | 273 +++ .../test_monodomain_stability_domain.py | 68 + .../test_parallelSDC_reloaded.py | 115 + .../test_pintsime/test_SwitchEstimator.py | 405 ++++ .../test_second_orderSDC/test_convergence.py | 20 +- .../test_second_orderSDC/test_stability.py | 59 +- pySDC/tests/test_sweepers/test_MPI_sweeper.py | 114 +- .../test_sweepers/test_Runge_Kutta_sweeper.py | 8 +- pyproject.toml | 3 +- 172 files changed, 17271 insertions(+), 2241 deletions(-) create mode 100644 docs/source/projects/monodomain.rst create mode 100644 docs/source/projects/parallelSDC_reloaded.rst create mode 100644 docs/source/projects/second_order.rst create mode 100644 pySDC/helpers/NCCL_communicator.py create mode 100644 pySDC/helpers/testing.py create mode 100644 pySDC/implementations/hooks/plotting.py create mode 100644 pySDC/implementations/problem_classes/Brusselator.py delete mode 100644 pySDC/implementations/problem_classes/TestEquation_0D_GPU.py create mode 100644 pySDC/implementations/problem_classes/generic_MPIFFT_Laplacian.py create mode 100644 pySDC/implementations/problem_classes/odeScalar.py create mode 100644 pySDC/implementations/problem_classes/odeSystem.py create mode 100644 pySDC/playgrounds/FEniCSx/HeatEquation_1D_FEniCSx_matrix_forced.py create mode 100644 pySDC/playgrounds/FEniCSx/HeatEquation_1D_FEniCSx_matrix_forced_2.py create mode 100644 pySDC/playgrounds/FEniCSx/HookClass_FEniCS_output.py create mode 100644 pySDC/playgrounds/FEniCSx/__init__.py create mode 100644 pySDC/playgrounds/FEniCSx/heat_equation_M.py create mode 100644 pySDC/playgrounds/FEniCSx/heat_equation_raw.py create mode 100644 pySDC/playgrounds/FEniCSx/playground.py create mode 100644 pySDC/playgrounds/ML_initial_guess/README.md create mode 100644 pySDC/playgrounds/ML_initial_guess/heat.py create mode 100644 pySDC/playgrounds/ML_initial_guess/ml_heat.py create mode 100644 pySDC/playgrounds/ML_initial_guess/sweeper.py create mode 100644 pySDC/playgrounds/ML_initial_guess/tensor.py create mode 100644 pySDC/projects/DAE/misc/DAEMesh.py create mode 100644 pySDC/projects/DAE/sweepers/SemiImplicitDAE.py create mode 100644 pySDC/projects/Monodomain/README.rst create mode 100644 pySDC/projects/Monodomain/datatype_classes/my_mesh.py create mode 100644 pySDC/projects/Monodomain/etc/environment-monodomain.yml create mode 100644 pySDC/projects/Monodomain/hooks/HookClass_pde.py create mode 100644 pySDC/projects/Monodomain/hooks/HookClass_post_iter_info.py create mode 100644 pySDC/projects/Monodomain/problem_classes/MonodomainODE.py create mode 100644 pySDC/projects/Monodomain/problem_classes/TestODE.py create mode 100644 pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/__init__.py create mode 100644 pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/bindings_definitions.cpp create mode 100644 pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/bistable.h create mode 100644 pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/compilation_command.txt create mode 100644 pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/courtemanche.h create mode 100644 pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/hodgkinhuxley.h create mode 100644 pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/ionicmodel.h create mode 100644 pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/tentusscher.h create mode 100644 pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/tentusscher_smooth.h create mode 100644 pySDC/projects/Monodomain/problem_classes/space_discretizazions/Parabolic_DCT.py create mode 100644 pySDC/projects/Monodomain/run_scripts/run_MonodomainODE.py create mode 100644 pySDC/projects/Monodomain/run_scripts/run_MonodomainODE_cli.py create mode 100644 pySDC/projects/Monodomain/run_scripts/run_TestODE.py create mode 100644 pySDC/projects/Monodomain/sweeper_classes/exponential_runge_kutta/imexexp_1st_order.py create mode 100644 pySDC/projects/Monodomain/sweeper_classes/runge_kutta/imexexp_1st_order.py create mode 100644 pySDC/projects/Monodomain/transfer_classes/TransferVectorOfDCTVectors.py create mode 100644 pySDC/projects/Monodomain/transfer_classes/Transfer_DCT_Vector.py create mode 100644 pySDC/projects/Monodomain/utils/data_management.py create mode 100644 pySDC/projects/Monodomain/visualization/show_monodomain_sol.py rename pySDC/projects/Second_orderSDC/{README.md => README.rst} (56%) delete mode 100644 pySDC/projects/Second_orderSDC/dampedharmonic_oscillator_run_stability.py create mode 100644 pySDC/projects/Second_orderSDC/harmonic_oscillator_params.py create mode 100644 pySDC/projects/Second_orderSDC/harmonic_oscillator_run_points.py create mode 100644 pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stab_interval.py create mode 100644 pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stability.py create mode 100644 pySDC/projects/Second_orderSDC/plot_helper.py create mode 100644 pySDC/projects/Second_orderSDC/stability_simulation.py create mode 100644 pySDC/projects/parallelSDC_reloaded/.gitignore create mode 100644 pySDC/projects/parallelSDC_reloaded/README.md create mode 100644 pySDC/projects/parallelSDC_reloaded/__init__.py create mode 100644 pySDC/projects/parallelSDC_reloaded/allenCahn_accuracy.py create mode 100644 pySDC/projects/parallelSDC_reloaded/allenCahn_setup.py create mode 100644 pySDC/projects/parallelSDC_reloaded/chemicalReaction_accuracy.py create mode 100644 pySDC/projects/parallelSDC_reloaded/chemicalReaction_setup.py create mode 100644 pySDC/projects/parallelSDC_reloaded/convergence.py create mode 100644 pySDC/projects/parallelSDC_reloaded/jacobiElliptic_accuracy.py create mode 100644 pySDC/projects/parallelSDC_reloaded/jacobiElliptic_setup.py create mode 100644 pySDC/projects/parallelSDC_reloaded/kaps_accuracy.py create mode 100644 pySDC/projects/parallelSDC_reloaded/kaps_setup.py create mode 100644 pySDC/projects/parallelSDC_reloaded/lorenz_accuracy.py create mode 100644 pySDC/projects/parallelSDC_reloaded/lorenz_setup.py create mode 100644 pySDC/projects/parallelSDC_reloaded/nilpotency.py create mode 100644 pySDC/projects/parallelSDC_reloaded/protheroRobinsonAutonomous_accuracy.py create mode 100644 pySDC/projects/parallelSDC_reloaded/protheroRobinsonAutonomous_setup.py create mode 100644 pySDC/projects/parallelSDC_reloaded/protheroRobinson_accuracy.py create mode 100644 pySDC/projects/parallelSDC_reloaded/protheroRobinson_setup.py create mode 100644 pySDC/projects/parallelSDC_reloaded/scripts/__init__.py create mode 100644 pySDC/projects/parallelSDC_reloaded/scripts/_dataRef.json create mode 100755 pySDC/projects/parallelSDC_reloaded/scripts/crop.sh create mode 100644 pySDC/projects/parallelSDC_reloaded/scripts/fig01_conv.py create mode 100644 pySDC/projects/parallelSDC_reloaded/scripts/fig02_stab.py create mode 100644 pySDC/projects/parallelSDC_reloaded/scripts/fig03_lorenz.py create mode 100644 pySDC/projects/parallelSDC_reloaded/scripts/fig04_protheroRobinson.py create mode 100644 pySDC/projects/parallelSDC_reloaded/scripts/fig05_allenCahn.py create mode 100755 pySDC/projects/parallelSDC_reloaded/scripts/run.sh create mode 120000 pySDC/projects/parallelSDC_reloaded/scripts/utils.py create mode 100644 pySDC/projects/parallelSDC_reloaded/stability.py create mode 100644 pySDC/projects/parallelSDC_reloaded/utils.py create mode 100644 pySDC/projects/parallelSDC_reloaded/vanderpol_accuracy.py create mode 100644 pySDC/projects/parallelSDC_reloaded/vanderpol_setup.py create mode 100644 pySDC/tests/test_datatypes/test_multicomponent_mesh.py create mode 100644 pySDC/tests/test_helpers/.gitignore create mode 100644 pySDC/tests/test_helpers/test_testing.py create mode 100644 pySDC/tests/test_hooks/test_log_to_file.py create mode 100644 pySDC/tests/test_problems/test_Brusselator.py create mode 100644 pySDC/tests/test_problems/test_GrayScottMPIFFT.py create mode 100644 pySDC/tests/test_projects/test_DAE/test_DAEMesh.py create mode 100644 pySDC/tests/test_projects/test_DAE/test_HookClass_DAE.py create mode 100644 pySDC/tests/test_projects/test_DAE/test_SemiImplicitDAE.py rename pySDC/tests/test_projects/test_DAE/{test_sweeper.py => test_fully_implicit_DAE.py} (91%) delete mode 100644 pySDC/tests/test_projects/test_DAE/test_misc.py create mode 100644 pySDC/tests/test_projects/test_monodomain/test_monodomain_convergence.py create mode 100644 pySDC/tests/test_projects/test_monodomain/test_monodomain_iterations.py create mode 100644 pySDC/tests/test_projects/test_monodomain/test_monodomain_iterations_parallel.py create mode 100644 pySDC/tests/test_projects/test_monodomain/test_monodomain_stability_domain.py create mode 100644 pySDC/tests/test_projects/test_parallelSDC_reloaded.py create mode 100644 pySDC/tests/test_projects/test_pintsime/test_SwitchEstimator.py diff --git a/.gitignore b/.gitignore index 98df6abec9..0f586897a1 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,8 @@ data/* step_*.png *.pkl *.swp +*_data.json +!_dataRef.json # Created by https://www.gitignore.io @@ -158,3 +160,7 @@ Temporary Items .vscode *.cpp pySDC/playgrounds/FEniCS/jitfailure-dolfin_expression_fc28530d435fa2de045af3312fc07c3b/recompile.sh + +# videos +*.mp4 +*.mkv diff --git a/CHANGELOG.md b/CHANGELOG.md index 6636fdbfd2..6ed6fbddd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ :arrow_left: [Back to main page](./README.md) +- January 24, 2024: Version 5.4.0 contains the code for the [second order SDC paper](https://arxiv.org/abs/2310.08352) by [\@ikrom96git](https://github.com/ikrom96git). It also has some changes to the FEniCS + part, including correct treatment of boundary conditions and first steps with FEniCS-x. - July 28, 2023: For Version 5.3 a lot of pull requests got merged, thanks to [\@brownbaerchen](https://github.com/brownbaerchen), [\@tlunet](https://github.com/tlunet), [\@lisawim](https://github.com/lisawim), [\@ikrom96git](https://github.com/ikrom96git) for all the contributions. Besides the usual bugfixing and polishing, `pySDC` now comes with linear multistep methods, classical Runge Kutta methods, DAE sweepers, and more/improved projects. diff --git a/CITATION.cff b/CITATION.cff index 2f8acf4f88..8616ff5c07 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -21,9 +21,9 @@ authors: orcid: https://orcid.org/0000-0002-8869-0784 affiliation: "Hamburg University of Technology, Institute of Mathematics, 21073 Hamburg, Germany" -version: 5.3.0 +version: 5.4.3 doi: 10.5281/zenodo.594191 -date-released: 2023-07-25 +date-released: 2024-03-27 keywords: - "parallel-in-time" - "spectral deferred corrections" diff --git a/README.md b/README.md index ee21791bd8..b20d6e7b19 100644 --- a/README.md +++ b/README.md @@ -32,8 +32,7 @@ implemented. - Continuous integration via [GitHub Actions](https://github.com/Parallel-in-Time/pySDC/actions) and [Gitlab CI](https://gitlab.hzdr.de/r.speck/pysdc/-/pipelines) -- Fully compatible with Python 3.7 - 3.10, runs at least on Ubuntu and - MacOS +- Fully compatible with Python 3.8 - 3.10, runs at least on Ubuntu ## Getting started @@ -106,7 +105,7 @@ The JU receives support from the European Union's Horizon 2020 research and innovation programme and Belgium, France, Germany, and Switzerland. This project also received funding from the [German Federal Ministry of Education and Research](https://www.bmbf.de/bmbf/en/home/home_node.html) -(BMBF) grant 16HPC047. The project also received help from the +(BMBF) grants 16HPC047 and 16ME0679K. Supported by the European Union - NextGenerationEU. The project also received help from the [Helmholtz Platform for Research Software Engineering - Preparatory Study (HiRSE_PS)](https://www.helmholtz-hirse.de/). diff --git a/docs/contrib/02_continuous_integration.md b/docs/contrib/02_continuous_integration.md index 98962944b6..2a81720210 100644 --- a/docs/contrib/02_continuous_integration.md +++ b/docs/contrib/02_continuous_integration.md @@ -50,14 +50,14 @@ done # apply black and stage the changes that black made if [[ $files != "" ]] -then +then black $files git add $files fi ``` You may need to run `chmod +x` on the file to allow it to be executed. -Be aware that the hook will alter files you may have opened in an editor whenever you make a commit, which may confuse you(r editor). +Be aware that the hook will alter files you may have opened in an editor whenever you make a commit, which may confuse you(r editor). To automate flakeheaven, we want to write a hook that alters the commit message in case any errors are detected. This gives us the choice of aborting the commit and fixing the issues, or we can go ahead and commit them and worry about flakeheaven only when the time comes to do a pull request. To obtain this functionality, add the following to `/.git/hooks/prepare-commit-msg`: @@ -91,7 +91,7 @@ This is done using [pytest](https://docs.pytest.org/en/7.2.x/), and runs all the ```bash # Install required packages (works also with conda/mamba) -pip install pytest<7.2.0 pytest-benchmark coverage[toml] +pip install pytest pytest-benchmark pytest-timeout coverage[toml] # Run tests pytest -v pySDC/tests ``` @@ -102,6 +102,13 @@ pytest -v pySDC/tests > ```bash > pytest -v pySDC/tests/test_nodes.py # only test nodes generation > ``` +> +> You can also run one specific test only like this: +> +> ```bash +> pytest -v pySDC/tests/test_nodes.py::test_nodesGeneration # only test_nodesGeneration function +> pytest -v pySDC/tests/test_nodes.py::test_nodesGeneration[LEGENDRE] # only test_nodesGeneration with LEGENDRE nodes +> ``` ## Running CI on HPC from pull requests diff --git a/docs/source/conf.py b/docs/source/conf.py index e4263ea756..5687b3ea35 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -62,7 +62,7 @@ # General information about the project. project = 'pySDC' -copyright = '2023, Robert Speck' +copyright = '2024, Robert Speck' author = 'Robert Speck, Thibaut Lunet, Thomas Baumann, Lisa Wimmer, Ikrom Akramov' # The version info for the project you're documenting, acts as replacement for @@ -70,9 +70,9 @@ # built documents. # # The short X.Y version. -version = '5.3' +version = '5.4' # The full version, including alpha/beta/rc tags. -release = '5.3.0' +release = '5.4.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/index.rst b/docs/source/index.rst index 14970ed97c..8029dfa7c3 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -51,6 +51,8 @@ Projects projects/Resilience.rst projects/DAE.rst projects/compression.rst + projects/second_order.rst + projects/monodomain.rst API documentation diff --git a/docs/source/projects/monodomain.rst b/docs/source/projects/monodomain.rst new file mode 100644 index 0000000000..91c21b7b7a --- /dev/null +++ b/docs/source/projects/monodomain.rst @@ -0,0 +1 @@ +.. include:: /../../pySDC/projects/Monodomain/README.rst \ No newline at end of file diff --git a/docs/source/projects/parallelSDC_reloaded.rst b/docs/source/projects/parallelSDC_reloaded.rst new file mode 100644 index 0000000000..25815b8e93 --- /dev/null +++ b/docs/source/projects/parallelSDC_reloaded.rst @@ -0,0 +1 @@ +.. include:: /../../pySDC/projects/parallelSDC_reloaded/README.rst \ No newline at end of file diff --git a/docs/source/projects/second_order.rst b/docs/source/projects/second_order.rst new file mode 100644 index 0000000000..1dd6fa3a9d --- /dev/null +++ b/docs/source/projects/second_order.rst @@ -0,0 +1 @@ +.. include:: /../../pySDC/projects/Second_orderSDC/README.rst diff --git a/etc/environment-base.yml b/etc/environment-base.yml index 92f4a24904..97f7fbeb62 100644 --- a/etc/environment-base.yml +++ b/etc/environment-base.yml @@ -3,7 +3,7 @@ channels: - conda-forge - defaults dependencies: - - numpy<1.25 + - numpy - scipy>=0.17.1 - matplotlib>=3.0 - sympy>=1.0 diff --git a/pySDC/core/Lagrange.py b/pySDC/core/Lagrange.py index 6ef2d09653..3733eee1a0 100644 --- a/pySDC/core/Lagrange.py +++ b/pySDC/core/Lagrange.py @@ -88,7 +88,7 @@ class LagrangeApproximation(object): The associated barycentric weights """ - def __init__(self, points): + def __init__(self, points, fValues=None): points = np.asarray(points).ravel() diffs = points[:, None] - points[None, :] @@ -110,6 +110,20 @@ def analytic(diffs): self.points = points self.weights = weights + # Store function values if provided + if fValues is not None: + fValues = np.asarray(fValues) + if fValues.shape != points.shape: + raise ValueError(f'fValues {fValues.shape} has not the correct shape: {points.shape}') + self.fValues = fValues + + def __call__(self, t): + assert self.fValues is not None, "cannot evaluate polynomial without fValues" + t = np.asarray(t) + values = self.getInterpolationMatrix(t.ravel()).dot(self.fValues) + values.shape = t.shape + return values + @property def n(self): return self.points.size diff --git a/pySDC/core/Problem.py b/pySDC/core/Problem.py index c9a6f87322..4c5202a9b2 100644 --- a/pySDC/core/Problem.py +++ b/pySDC/core/Problem.py @@ -135,3 +135,32 @@ def generate_scipy_reference_solution(self, eval_rhs, t, u_init=None, t_init=Non u_shape = u_init.shape return solve_ivp(eval_rhs, (t_init, t), u_init.flatten(), **kwargs).y[:, -1].reshape(u_shape) + + def get_fig(self): + """ + Get a figure suitable to plot the solution of this problem + + Returns + ------- + self.fig : matplotlib.pyplot.figure.Figure + """ + raise NotImplementedError + + def plot(self, u, t=None, fig=None): + r""" + Plot the solution. Please supply a figure with the same structure as returned by ``self.get_fig``. + + Parameters + ---------- + u : dtype_u + Solution to be plotted + t : float + Time to display at the top of the figure + fig : matplotlib.pyplot.figure.Figure + Figure with the correct structure + + Returns + ------- + None + """ + raise NotImplementedError diff --git a/pySDC/core/Sweeper.py b/pySDC/core/Sweeper.py index 5eb54b11b8..2761f2acd8 100644 --- a/pySDC/core/Sweeper.py +++ b/pySDC/core/Sweeper.py @@ -1,4 +1,5 @@ import logging +import warnings import numpy as np import scipy as sp @@ -15,7 +16,7 @@ class _Pars(FrozenClass): def __init__(self, pars): self.do_coll_update = False - self.initial_guess = 'spread' + self.initial_guess = 'spread' # default value (see also below) self.skip_residual_computation = () # gain performance at the cost of correct residual output for k, v in pars.items(): @@ -58,13 +59,13 @@ def __init__(self, params): params['collocation_class'] = CollBase # prepare random generator for initial guess - if params.get('initial_guess', 'spread') == 'random': + if params.get('initial_guess', 'spread') == 'random': # default value (see also above) params['random_seed'] = params.get('random_seed', 1984) self.rng = np.random.RandomState(params['random_seed']) self.params = _Pars(params) - self.coll = params['collocation_class'](**params) + self.coll: CollBase = params['collocation_class'](**params) if not self.coll.right_is_node and not self.params.do_coll_update: self.logger.warning( @@ -117,6 +118,15 @@ def rho(x): d = opt.minimize(rho, x0, method='Nelder-Mead') QDmat[1:, 1:] = np.linalg.inv(np.diag(d.x)) self.parallelizable = True + elif qd_type.startswith('MIN-SR-FLEX'): + m = QDmat.shape[0] - 1 + try: + k = abs(int(qd_type[11:])) + except ValueError: + k = 1 + d = min(k, m) + QDmat[1:, 1:] = np.diag(coll.nodes) / d + self.parallelizable = True elif qd_type in ['MIN_GT', 'MIN-SR-NS']: m = QDmat.shape[0] - 1 QDmat[1:, 1:] = np.diag(coll.nodes) / m @@ -271,39 +281,119 @@ def rho(x): elif qd_type == "MIN-SR-S": M = QDmat.shape[0] - 1 - Q = coll.Qmat[1:, 1:] - nodes = coll.nodes + quadType = coll.quad_type + nodeType = coll.node_type + + # Main function to compute coefficients + def computeCoeffs(M, a=None, b=None): + """ + Compute diagonal coefficients for a given number of nodes M. + If `a` and `b` are given, then it uses as initial guess: + + >>> a * nodes**b / M + + If `a` is not given, then do not care about `b` and uses as initial guess: + + >>> nodes / M + + Parameters + ---------- + M : int + Number of collocation nodes. + a : float, optional + `a` coefficient for the initial guess. + b : float, optional + `b` coefficient for the initial guess. + + Returns + ------- + coeffs : array + The diagonal coefficients. + nodes : array + The nodes associated to the current coefficients. + """ + collM = CollBase(num_nodes=M, node_type=nodeType, quad_type=quadType) + + QM = collM.Qmat[1:, 1:] + nodesM = collM.nodes + + if quadType in ['LOBATTO', 'RADAU-LEFT']: + QM = QM[1:, 1:] + nodesM = nodesM[1:] + nCoeffs = len(nodesM) + + if nCoeffs == 1: + coeffs = np.diag(QM) + + else: + + def nilpotency(coeffs): + """Function verifying the nilpotency from given coefficients""" + coeffs = np.asarray(coeffs) + kMats = [(1 - z) * np.eye(nCoeffs) + z * np.diag(1 / coeffs) @ QM for z in nodesM] + vals = [np.linalg.det(K) - 1 for K in kMats] + return np.array(vals) + + if a is None: + coeffs0 = nodesM / M + else: + coeffs0 = a * nodesM**b / M + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + coeffs = sp.optimize.fsolve(nilpotency, coeffs0, xtol=1e-15) + + # Handle first node equal to zero + if quadType in ['LOBATTO', 'RADAU-LEFT']: + coeffs = np.asarray([0.0] + list(coeffs)) + nodesM = np.asarray([0.0] + list(nodesM)) - nCoeffs = M - if coll.quad_type in ['LOBATTO', 'RADAU-LEFT']: - nCoeffs -= 1 - Q = Q[1:, 1:] - nodes = nodes[1:] + return coeffs, nodesM - def func(coeffs): - coeffs = np.asarray(coeffs) - kMats = [(1 - z) * np.eye(nCoeffs) + z * np.diag(1 / coeffs) @ Q for z in nodes] - vals = [np.linalg.det(K) - 1 for K in kMats] - return np.array(vals) + def fit(coeffs, nodes): + """Function fitting given coefficients to a power law""" - coeffs = sp.optimize.fsolve(func, nodes / M, xtol=1e-13) + def lawDiff(ab): + a, b = ab + return np.linalg.norm(a * nodes**b - coeffs) - if coll.quad_type in ['LOBATTO', 'RADAU-LEFT']: - coeffs = [0] + list(coeffs) + sol = sp.optimize.minimize(lawDiff, [1.0, 1.0], method="nelder-mead") + return sol.x + + # Compute coefficients incrementaly + a, b = None, None + m0 = 2 if quadType in ['LOBATTO', 'RADAU-LEFT'] else 1 + for m in range(m0, M + 1): + coeffs, nodes = computeCoeffs(m, a, b) + if m > 1: + a, b = fit(coeffs * m, nodes) QDmat[1:, 1:] = np.diag(coeffs) + self.parallelizable = True + + elif qd_type == "VDHS": + # coefficients from Van der Houwen & Sommeijer, 1991 + + m = QDmat.shape[0] - 1 + if m == 4 and coll.node_type == 'LEGENDRE' and coll.quad_type == "RADAU-RIGHT": + coeffs = [3055 / 9532, 531 / 5956, 1471 / 8094, 1848 / 7919] + else: + raise NotImplementedError('no VDHS diagonal coefficients for this node configuration') + + QDmat[1:, 1:] = np.diag(coeffs) self.parallelizable = True else: # see if an explicit preconditioner with this name is available try: QDmat = self.get_Qdelta_explicit(coll, qd_type) - self.logger.warn(f'Using explicit preconditioner \"{qd_type}\" on the left hand side!') + self.logger.warning(f'Using explicit preconditioner \"{qd_type}\" on the left hand side!') except NotImplementedError: raise NotImplementedError(f'qd_type implicit "{qd_type}" not implemented') # check if we got not more than a lower triangular matrix + # TODO : this should be a regression test, not run-time ... np.testing.assert_array_equal( np.triu(QDmat, k=1), np.zeros(QDmat.shape), err_msg='Lower triangular matrix expected!' ) @@ -349,6 +439,10 @@ def predict(self): if self.params.initial_guess == 'spread': L.u[m] = P.dtype_u(L.u[0]) L.f[m] = P.eval_f(L.u[m], L.time + L.dt * self.coll.nodes[m - 1]) + # copy u[0] and RHS evaluation to all collocation nodes + elif self.params.initial_guess == 'copy': + L.u[m] = P.dtype_u(L.u[0]) + L.f[m] = P.dtype_f(L.f[0]) # start with zero everywhere elif self.params.initial_guess == 'zero': L.u[m] = P.dtype_u(init=P.init, val=0.0) diff --git a/pySDC/helpers/NCCL_communicator.py b/pySDC/helpers/NCCL_communicator.py new file mode 100644 index 0000000000..155c47622b --- /dev/null +++ b/pySDC/helpers/NCCL_communicator.py @@ -0,0 +1,98 @@ +from mpi4py import MPI +from cupy.cuda import nccl +import cupy as cp +import numpy as np + + +class NCCLComm(object): + """ + Wraps an MPI communicator and performs some calls to NCCL functions instead. + """ + + def __init__(self, comm): + """ + Args: + comm (mpi4py.Intracomm): MPI communicator + """ + self.commMPI = comm + + uid = comm.bcast(nccl.get_unique_id(), root=0) + self.commNCCL = nccl.NcclCommunicator(comm.size, uid, comm.rank) + + def __getattr__(self, name): + """ + Pass calls that are not explicitly overridden by NCCL functionality on to the MPI communicator. + When performing any operations that depend on data, we have to synchronize host and device beforehand. + + Args: + Name (str): Name of the requested attribute + """ + if name not in ['size', 'rank', 'Get_rank', 'Get_size', 'Split']: + cp.cuda.get_current_stream().synchronize() + return getattr(self.commMPI, name) + + @staticmethod + def get_dtype(data): + """ + As NCCL doesn't support complex numbers, we have to act as if we're sending two real numbers if using complex. + """ + dtype = data.dtype + if dtype in [np.dtype('float32'), np.dtype('complex64')]: + return nccl.NCCL_FLOAT32 + elif dtype in [np.dtype('float64'), np.dtype('complex128')]: + return nccl.NCCL_FLOAT64 + elif dtype in [np.dtype('int32')]: + return nccl.NCCL_INT32 + elif dtype in [np.dtype('int64')]: + return nccl.NCCL_INT64 + else: + raise NotImplementedError(f'Don\'t know what NCCL dtype to use to send data of dtype {data.dtype}!') + + @staticmethod + def get_count(data): + """ + As NCCL doesn't support complex numbers, we have to act as if we're sending two real numbers if using complex. + """ + if cp.iscomplexobj(data): + return data.size * 2 + else: + return data.size + + def get_op(self, MPI_op): + if MPI_op == MPI.SUM: + return nccl.NCCL_SUM + elif MPI_op == MPI.PROD: + return nccl.NCCL_PROD + elif MPI_op == MPI.MAX: + return nccl.NCCL_MAX + elif MPI_op == MPI.MIN: + return nccl.NCCL_MIN + else: + raise NotImplementedError('Don\'t know what NCCL operation to use to replace this MPI operation!') + + def Reduce(self, sendbuf, recvbuf, op=MPI.SUM, root=0): + dtype = self.get_dtype(sendbuf) + count = self.get_count(sendbuf) + op = self.get_op(op) + recvbuf = cp.empty(1) if recvbuf is None else recvbuf + stream = cp.cuda.get_current_stream() + + self.commNCCL.reduce( + sendbuf=sendbuf.data.ptr, + recvbuf=recvbuf.data.ptr, + count=count, + datatype=dtype, + op=op, + root=root, + stream=stream.ptr, + ) + + def Allreduce(self, sendbuf, recvbuf, op=MPI.SUM): + dtype = self.get_dtype(sendbuf) + count = self.get_count(sendbuf) + op = self.get_op(op) + stream = cp.cuda.get_current_stream() + + self.commNCCL.allReduce( + sendbuf=sendbuf.data.ptr, recvbuf=recvbuf.data.ptr, count=count, datatype=dtype, op=op, stream=stream.ptr + ) diff --git a/pySDC/helpers/problem_helper.py b/pySDC/helpers/problem_helper.py index 31d1caeca2..6a39286262 100644 --- a/pySDC/helpers/problem_helper.py +++ b/pySDC/helpers/problem_helper.py @@ -156,7 +156,7 @@ def get_finite_difference_matrix( # -- boundary condition parameters bc_params[iS] = {**bc_params_defaults, **bc_params[iS]} - par = bc_params[iS] + par = bc_params[iS].copy() # -- extract parameters and raise an error if additionals val = par.pop('val') diff --git a/pySDC/helpers/testing.py b/pySDC/helpers/testing.py new file mode 100644 index 0000000000..f1b6187348 --- /dev/null +++ b/pySDC/helpers/testing.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 9 14:41:43 2024 + +Helpers module for testing utilities +""" +import os +import json +import warnings +import numpy as np + + +class DataChecker: + """ + Object allowing to quickly generate and check generated data from scripts. + + Here is an example of use for one given script: + + >>> from pySDC.helpers.testing import DataChecker + >>> + >>> data = DataChecker(__file__) + >>> # ... some computations ... + >>> data.storeAndCheck('solution', solution) + >>> # ... some other computations ... + >>> data.storeAndCheck('errors', errors) + >>> # ... ploting some figures + >>> data.writeToJSON() # end of script + + The `storeAndCheck` method use a unique key as first argument, and the + data as second argument, into list format. + Calling this method will store the data into a cache variable, and compare + with reference data stored in `_dataRef.json` (if it exists, see below ...). + Finally, the `writeToJSON` method saves the cache variable into a json + file `_data.json`. + + When there is no `_dataRef.json` in the script directory, + executing the script output some warning telling that there is no + reference data to compare with. + To remove those warnings (and properly test data), just rename the + `_data.json` into `_dataRef.json`. + Then re-running the script will then compare the newly generated data with + the reference data stored into `_dataRef.json`, and raise an error if there + is some differences. + + Important + --------- + + - using the `__file__` built-in variable is necessary to save data in the + script directory, independently of the working directory. + - several script in the same directory can use the DataChecker, which + implies that the key provided to `storeAndCheck` must be unique across + all directory scripts. If not, the same key from previous directory will + simply be overwritten in the cached variable and the `_data.ref` file. + """ + + def __init__(self, filePath): + path = '/' + os.path.join(*filePath.split('/')[:-1]) + self._data = {} # cache for data + self._dataRef = None # cache for reference data + self._dataFile = os.path.join(path, '_data.json') + self._dataRefFile = os.path.join(path, '_dataRef.json') + + def storeAndCheck(self, key, data, rtol=1e-5, atol=1e-8): + """ + Store data into cache, and check with eventual reference data + + Parameters + ---------- + key : str + Unique key (project wide) for the data. + data : list or array-like + The data that has to be stored. + rtol : float + Relative tolerance + atol : float + Absolute tolerance + """ + self._data[key] = list(data) + if self._dataRef is None: + try: + with open(self._dataRefFile, "r") as f: + self._dataRef = json.load(f) + except FileNotFoundError: + warnings.warn(f"no reference data to check key:{key}") + return # pragma: no cover + + assert key in self._dataRef, f"key:{key} not in reference data" + + data = self._data[key] + ref = self._dataRef[key] + + assert len(data) == len(ref), f"data with len:{len(data)}) cannot be compared to ref with len:{len(ref)})" + assert np.allclose( + data, ref, atol=atol, rtol=rtol, equal_nan=True + ), f"difference between data:{data} and ref:{ref}" + + def writeToJSON(self): + """Write cached data into a json file""" + if os.path.isfile(self._dataFile): + with open(self._dataFile, "r") as f: + self._data.update(json.load(f)) + with open(self._dataFile, "w") as f: + json.dump(self._data, f) diff --git a/pySDC/implementations/convergence_controller_classes/adaptivity.py b/pySDC/implementations/convergence_controller_classes/adaptivity.py index f4efe12211..75a4faaa9f 100644 --- a/pySDC/implementations/convergence_controller_classes/adaptivity.py +++ b/pySDC/implementations/convergence_controller_classes/adaptivity.py @@ -179,7 +179,8 @@ def dependencies(self, controller, description, **kwargs): description=description, params={}, ) - self.interpolator = controller.convergence_controllers[-1] + if self.params.interpolate_between_restarts: + self.interpolator = controller.convergence_controllers[-1] return None def get_convergence(self, controller, S, **kwargs): @@ -207,6 +208,7 @@ def setup(self, controller, params, description, **kwargs): 'residual_max_tol': 1e9, 'maxiter': description['sweeper_params'].get('maxiter', 99), 'interpolate_between_restarts': True, + 'abort_at_growing_residual': True, **super().setup(controller, params, description, **kwargs), } if defaults['restol_rel']: @@ -231,7 +233,12 @@ def determine_restart(self, controller, S, **kwargs): self.trigger_restart_upon_nonconvergence(S) elif self.get_local_error_estimate(controller, S, **kwargs) > self.params.e_tol: S.status.restart = True - elif S.status.time_size == 1 and self.res_last_iter < S.levels[0].status.residual and S.status.iter > 0: + elif ( + S.status.time_size == 1 + and self.res_last_iter < S.levels[0].status.residual + and S.status.iter > 0 + and self.params.abort_at_growing_residual + ): self.trigger_restart_upon_nonconvergence(S) elif S.levels[0].status.residual > self.params.residual_max_tol: self.trigger_restart_upon_nonconvergence(S) diff --git a/pySDC/implementations/datatype_classes/mesh.py b/pySDC/implementations/datatype_classes/mesh.py index 6fdf1746bd..ce99ebc78a 100644 --- a/pySDC/implementations/datatype_classes/mesh.py +++ b/pySDC/implementations/datatype_classes/mesh.py @@ -80,7 +80,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs): else: args.append(input_) - results = super(mesh, self).__array_ufunc__(ufunc, method, *args, **kwargs).view(mesh) + results = super(mesh, self).__array_ufunc__(ufunc, method, *args, **kwargs).view(type(self)) if type(self) == type(results): results._comm = comm return results @@ -149,74 +149,66 @@ def bcast(self, root=None, comm=None): return self -class imex_mesh(object): +class MultiComponentMesh(mesh): + r""" + Generic mesh with multiple components. + + To make a specific multi-component mesh, derive from this class and list the components as strings in the class + attribute ``components``. An example: + + ``` + class imex_mesh(MultiComponentMesh): + components = ['impl', 'expl'] + ``` + + Instantiating such a mesh will expand the mesh along an added first dimension for each component and allow access + to the components with ``.``. Continuing the above example: + + ``` + init = ((100,), None, numpy.dtype('d')) + f = imex_mesh(init) + f.shape # (2, 100) + f.expl.shape # (100,) + ``` + + Note that the components are not attributes of the mesh: ``"expl" in dir(f)`` will return False! Rather, the + components are handled in ``__getattr__``. This function is called if an attribute is not found and returns a view + on to the component if appropriate. Importantly, this means that you cannot name a component like something that + is already an attribute of ``mesh`` or ``numpy.ndarray`` because this will not result in calls to ``__getattr__``. + + There are a couple more things to keep in mind: + - Because a ``MultiComponentMesh`` is just a ``numpy.ndarray`` with one more dimension, all components must have + the same shape. + - You can use the entire ``MultiComponentMesh`` like a ``numpy.ndarray`` in operations that accept arrays, but make + sure that you really want to apply the same operation on all components if you do. + - If you omit the assignment operator ``[:]`` during assignment, you will not change the mesh at all. Omitting this + leads to all kinds of trouble throughout the code. But here you really cannot get away without. """ - RHS data type for meshes with implicit and explicit components - This data type can be used to have RHS with 2 components (here implicit and explicit) + components = [] - Attributes: - impl (mesh.mesh): implicit part - expl (mesh.mesh): explicit part - """ - - def __init__(self, init, val=0.0): - """ - Initialization routine - - Args: - init: can either be a tuple (one int per dimension) or a number (if only one dimension is requested) - or another imex_mesh object - val (float): an initial number (default: 0.0) - Raises: - DataError: if init is none of the types above - """ - - if isinstance(init, type(self)): - self.impl = mesh(init.impl) - self.expl = mesh(init.expl) - elif ( - isinstance(init, tuple) - and (init[1] is None or isinstance(init[1], MPI.Intracomm)) - and isinstance(init[2], np.dtype) - ): - self.impl = mesh(init, val=val) - self.expl = mesh(init, val=val) - # something is wrong, if none of the ones above hit + def __new__(cls, init, *args, **kwargs): + if isinstance(init, tuple): + shape = (init[0],) if type(init[0]) is int else init[0] + obj = super().__new__(cls, ((len(cls.components), *shape), *init[1:]), *args, **kwargs) else: - raise DataError('something went wrong during %s initialization' % type(self)) + obj = super().__new__(cls, init, *args, **kwargs) + return obj -class comp2_mesh(object): - """ - RHS data type for meshes with 2 components + def __getattr__(self, name): + if name in self.components: + if self.shape[0] == len(self.components): + return self[self.components.index(name)] + else: + raise AttributeError(f'Cannot access {name!r} in {type(self)!r} because the shape is unexpected.') + else: + raise AttributeError(f"{type(self)!r} does not have attribute {name!r}!") - Attributes: - comp1 (mesh.mesh): first part - comp2 (mesh.mesh): second part - """ - def __init__(self, init, val=0.0): - """ - Initialization routine +class imex_mesh(MultiComponentMesh): + components = ['impl', 'expl'] - Args: - init: can either be a tuple (one int per dimension) or a number (if only one dimension is requested) - or another comp2_mesh object - Raises: - DataError: if init is none of the types above - """ - if isinstance(init, type(self)): - self.comp1 = mesh(init.comp1) - self.comp2 = mesh(init.comp2) - elif ( - isinstance(init, tuple) - and (init[1] is None or isinstance(init[1], MPI.Intracomm)) - and isinstance(init[2], np.dtype) - ): - self.comp1 = mesh(init, val=val) - self.comp2 = mesh(init, val=val) - # something is wrong, if none of the ones above hit - else: - raise DataError('something went wrong during %s initialization' % type(self)) +class comp2_mesh(MultiComponentMesh): + components = ['comp1', 'comp2'] diff --git a/pySDC/implementations/hooks/log_solution.py b/pySDC/implementations/hooks/log_solution.py index f6a1980558..d6c85b8e43 100644 --- a/pySDC/implementations/hooks/log_solution.py +++ b/pySDC/implementations/hooks/log_solution.py @@ -1,4 +1,7 @@ from pySDC.core.Hooks import hooks +import pickle +import os +import numpy as np class LogSolution(hooks): @@ -63,3 +66,79 @@ def post_iteration(self, step, level_number): type='u', value=L.uend, ) + + +class LogToFile(hooks): + r""" + Hook for logging the solution to file after the step using pickle. + + Please configure the hook to your liking by manipulating class attributes. + You must set a custom path to a directory like so: + + ``` + LogToFile.path = '/my/directory/' + ``` + + Keep in mind that the hook will overwrite files without warning! + You can give a custom file name by setting the ``file_name`` class attribute and give a custom way of rendering the + index associated with individual files by giving a different lambda function ``format_index`` class attribute. This + lambda should accept one index and return one string. + + You can also give a custom ``logging_condition`` lambda, accepting the current level if you want to log selectively. + + Importantly, you may need to change ``process_solution``. By default, this will return a numpy view of the solution. + Of course, if you are not using numpy, you need to change this. Again, this is a lambda accepting the level. + + After the fact, you can use the classmethod `get_path` to get the path to a certain data or the `load` function to + directly load the solution at a given index. Just configure the hook like you did when you recorded the data + beforehand. + + Finally, be aware that using this hook with MPI parallel runs may lead to different tasks overwriting files. Make + sure to give a different `file_name` for each task that writes files. + """ + + path = None + file_name = 'solution' + logging_condition = lambda L: True + process_solution = lambda L: {'t': L.time + L.dt, 'u': L.uend.view(np.ndarray)} + format_index = lambda index: f'{index:06d}' + + def __init__(self): + super().__init__() + self.counter = 0 + + if self.path is None: + raise ValueError('Please set a path for logging as the class attribute `LogToFile.path`!') + + if os.path.isfile(self.path): + raise ValueError( + f'{self.path!r} is not a valid path to log to because a file of the same name exists. Please supply a directory' + ) + + if not os.path.isdir(self.path): + os.mkdir(self.path) + + def post_step(self, step, level_number): + if level_number > 0: + return None + + L = step.levels[level_number] + + if type(self).logging_condition(L): + path = self.get_path(self.counter) + data = type(self).process_solution(L) + + with open(path, 'wb') as file: + pickle.dump(data, file) + + self.counter += 1 + + @classmethod + def get_path(cls, index): + return f'{cls.path}/{cls.file_name}_{cls.format_index(index)}.pickle' + + @classmethod + def load(cls, index): + path = cls.get_path(index) + with open(path, 'rb') as file: + return pickle.load(file) diff --git a/pySDC/implementations/hooks/plotting.py b/pySDC/implementations/hooks/plotting.py new file mode 100644 index 0000000000..95b22d368d --- /dev/null +++ b/pySDC/implementations/hooks/plotting.py @@ -0,0 +1,76 @@ +from pySDC.core.Hooks import hooks +import matplotlib.pyplot as plt + + +class PlottingHook(hooks): # pragma: no cover + save_plot = None # Supply a string to the path where you want to save + live_plot = 1e-9 # Supply `None` if you don't want live plotting + + def __init__(self): + super().__init__() + self.plot_counter = 0 + + def pre_run(self, step, level_number): + prob = step.levels[level_number].prob + self.fig = prob.get_fig() + + def plot(self, step, level_number, plot_ic=False): + level = step.levels[level_number] + prob = level.prob + + if plot_ic: + u = level.u[0] + else: + level.sweep.compute_end_point() + u = level.uend + + prob.plot(u=u, t=step.time, fig=self.fig) + + if self.save_plot is not None: + path = f'{self.save_plot}_{self.plot_counter:04d}.png' + self.fig.savefig(path, dpi=100) + self.logger.log(25, f'Saved figure {path!r}.') + + if self.live_plot is not None: + plt.pause(self.live_plot) + + self.plot_counter += 1 + + +class PlotPostStep(PlottingHook): # pragma: no cover + """ + Call a plotting function of the problem after every step + """ + + plot_every = 1 + + def __init__(self): + super().__init__() + self.skip_counter = 0 + + def pre_run(self, step, level_number): + if level_number > 0: + return + super().pre_run(step, level_number) + self.plot(step, level_number, plot_ic=True) + + def post_step(self, step, level_number): + """ + Call the plotting function after the step + + Args: + step (pySDC.Step.step): The current step + level_number (int): Number of current level + + Returns: + None + """ + if level_number > 0: + return + + self.skip_counter += 1 + + if self.skip_counter % self.plot_every >= 1: + return + + self.plot(step, level_number) diff --git a/pySDC/implementations/problem_classes/AllenCahn_1D_FD.py b/pySDC/implementations/problem_classes/AllenCahn_1D_FD.py index af1c3ca18f..ec5a02cd65 100644 --- a/pySDC/implementations/problem_classes/AllenCahn_1D_FD.py +++ b/pySDC/implementations/problem_classes/AllenCahn_1D_FD.py @@ -70,6 +70,7 @@ def __init__( newton_tol=1e-12, interval=(-0.5, 0.5), stop_at_nan=True, + stop_at_maxiter=False, ): # we assert that nvars looks very particular here.. this will be necessary for coarsening in space later on if (nvars + 1) % 2: @@ -85,6 +86,7 @@ def __init__( 'newton_tol', 'interval', 'stop_at_nan', + 'stop_at_maxiter', localVars=locals(), readOnly=True, ) @@ -185,7 +187,11 @@ def solve_system(self, rhs, factor, u0, t): self.logger.warning('Newton got nan after %i iterations...' % n) if n == self.newton_maxiter: - self.logger.warning('Newton did not converge after %i iterations, error is %s' % (n, res)) + msg = 'Newton did not converge after %i iterations, error is %s' % (n, res) + if self.stop_at_maxiter: + raise ProblemError(msg) + else: + self.logger.warning(msg) me = self.dtype_u(self.init) me[:] = u[:] @@ -226,19 +232,18 @@ def eval_f(self, u, t): def u_exact(self, t): r""" - Routine to compute the exact solution at time :math:`t`. + Routine to return initial condition or the exact solution Parameters ---------- t : float - Time of the exact solution. + Time at which the exact solution is computed. Returns ------- me : dtype_u - The exact solution. + The exact solution (in space and time). """ - v = 3.0 * np.sqrt(2) * self.eps * self.dw me = self.dtype_u(self.init, val=0.0) me[:] = 0.5 * (1 + np.tanh((self.xvalues - v * t) / (np.sqrt(2) * self.eps))) @@ -665,19 +670,18 @@ def eval_f(self, u, t): def u_exact(self, t): r""" - Routine to compute the exact solution at time :math:`t`. + Routine to return initial condition or the exact solution. Parameters ---------- t : float - Time of the exact solution. + Time at which the approximated exact solution is computed. Returns ------- me : dtype_u - The exact solution. + The approximated exact solution. """ - v = 3.0 * np.sqrt(2) * self.eps * self.dw me = self.dtype_u(self.init, val=0.0) me[:] = 0.5 * (1 + np.tanh((self.radius - abs(self.xvalues) - v * t) / (np.sqrt(2) * self.eps))) diff --git a/pySDC/implementations/problem_classes/AllenCahn_MPIFFT.py b/pySDC/implementations/problem_classes/AllenCahn_MPIFFT.py index 17f770d218..577a7e2646 100644 --- a/pySDC/implementations/problem_classes/AllenCahn_MPIFFT.py +++ b/pySDC/implementations/problem_classes/AllenCahn_MPIFFT.py @@ -1,15 +1,11 @@ import numpy as np from mpi4py import MPI -from mpi4py_fft import PFFT - -from pySDC.core.Errors import ProblemError -from pySDC.core.Problem import ptype -from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh +from pySDC.implementations.problem_classes.generic_MPIFFT_Laplacian import IMEX_Laplacian_MPIFFT from mpi4py_fft import newDistArray -class allencahn_imex(ptype): +class allencahn_imex(IMEX_Laplacian_MPIFFT): r""" Example implementing the :math:`N`-dimensional Allen-Cahn equation with periodic boundary conditions :math:`u \in [0, 1]^2` @@ -64,68 +60,21 @@ class allencahn_imex(ptype): .. [1] https://mpi4py-fft.readthedocs.io/en/latest/ """ - dtype_u = mesh - dtype_f = imex_mesh - def __init__( self, - nvars=None, eps=0.04, radius=0.25, - spectral=None, dw=0.0, - L=1.0, init_type='circle', - comm=MPI.COMM_WORLD, + **kwargs, ): - """Initialization routine""" - - if nvars is None: - nvars = (128, 128) - - if not (isinstance(nvars, tuple) and len(nvars) > 1): - raise ProblemError('Need at least two dimensions') - - # Creating FFT structure - ndim = len(nvars) - axes = tuple(range(ndim)) - self.fft = PFFT(comm, list(nvars), axes=axes, dtype=np.float64, collapse=True) - - # get test data to figure out type and dimensions - tmp_u = newDistArray(self.fft, spectral) - - # invoke super init, passing the communicator and the local dimensions as init - super().__init__(init=(tmp_u.shape, comm, tmp_u.dtype)) - self._makeAttributeAndRegister( - 'nvars', 'eps', 'radius', 'spectral', 'dw', 'L', 'init_type', 'comm', localVars=locals(), readOnly=True - ) - - L = np.array([self.L] * ndim, dtype=float) - - # get local mesh - X = np.ogrid[self.fft.local_slice(False)] - N = self.fft.global_shape() - for i in range(len(N)): - X[i] = X[i] * L[i] / N[i] - self.X = [np.broadcast_to(x, self.fft.shape(False)) for x in X] - - # get local wavenumbers and Laplace operator - s = self.fft.local_slice() - N = self.fft.global_shape() - k = [np.fft.fftfreq(n, 1.0 / n).astype(int) for n in N[:-1]] - k.append(np.fft.rfftfreq(N[-1], 1.0 / N[-1]).astype(int)) - K = [ki[si] for ki, si in zip(k, s)] - Ks = np.meshgrid(*K, indexing='ij', sparse=True) - Lp = 2 * np.pi / L - for i in range(ndim): - Ks[i] = (Ks[i] * Lp[i]).astype(float) - K = [np.broadcast_to(k, self.fft.shape(True)) for k in Ks] - K = np.array(K).astype(float) - self.K2 = np.sum(K * K, 0, dtype=float) - - # Need this for diagnostics - self.dx = self.L / nvars[0] - self.dy = self.L / nvars[1] + kwargs['L'] = kwargs.get('L', 1.0) + super().__init__(alpha=1.0, dtype=np.dtype('float'), **kwargs) + self._makeAttributeAndRegister('eps', 'radius', 'dw', 'init_type', localVars=locals(), readOnly=True) + + def _eval_explicit_part(self, u, t, f_expl): + f_expl[:] = -2.0 / self.eps**2 * u * (1.0 - u) * (1.0 - 2.0 * u) - 6.0 * self.dw * u * (1.0 - u) + return f_expl def eval_f(self, u, t): """ @@ -146,56 +95,24 @@ def eval_f(self, u, t): f = self.dtype_f(self.init) + f.impl[:] = self._eval_Laplacian(u, f.impl) + if self.spectral: f.impl = -self.K2 * u if self.eps > 0: tmp = self.fft.backward(u) - tmpf = -2.0 / self.eps**2 * tmp * (1.0 - tmp) * (1.0 - 2.0 * tmp) - 6.0 * self.dw * tmp * (1.0 - tmp) - f.expl[:] = self.fft.forward(tmpf) + tmp[:] = self._eval_explicit_part(tmp, t, tmp) + f.expl[:] = self.fft.forward(tmp) else: - u_hat = self.fft.forward(u) - lap_u_hat = -self.K2 * u_hat - f.impl[:] = self.fft.backward(lap_u_hat, f.impl) if self.eps > 0: - f.expl = -2.0 / self.eps**2 * u * (1.0 - u) * (1.0 - 2.0 * u) - 6.0 * self.dw * u * (1.0 - u) + f.expl[:] = self._eval_explicit_part(u, t, f.expl) + self.work_counters['rhs']() return f - def solve_system(self, rhs, factor, u0, t): - """ - Simple FFT solver for the diffusion part. - - Parameters - ---------- - rhs : dtype_f - Right-hand side for the linear system. - factor : float - Abbrev. for the node-to-node stepsize (or any other factor required). - u0 : dtype_u - Initial guess for the iterative solver (not used here so far). - t : float - Current time (e.g. for time-dependent BCs). - - Returns - ------- - me : dtype_u - The solution as mesh. - """ - - if self.spectral: - me = rhs / (1.0 + factor * self.K2) - - else: - me = self.dtype_u(self.init) - rhs_hat = self.fft.forward(rhs) - rhs_hat /= 1.0 + factor * self.K2 - me[:] = self.fft.backward(rhs_hat) - - return me - def u_exact(self, t): r""" Routine to compute the exact solution at time :math:`t`. @@ -216,18 +133,18 @@ def u_exact(self, t): if self.init_type == 'circle': r2 = (self.X[0] - 0.5) ** 2 + (self.X[1] - 0.5) ** 2 if self.spectral: - tmp = 0.5 * (1.0 + np.tanh((self.radius - np.sqrt(r2)) / (np.sqrt(2) * self.eps))) + tmp = 0.5 * (1.0 + self.xp.tanh((self.radius - self.xp.sqrt(r2)) / (np.sqrt(2) * self.eps))) me[:] = self.fft.forward(tmp) else: - me[:] = 0.5 * (1.0 + np.tanh((self.radius - np.sqrt(r2)) / (np.sqrt(2) * self.eps))) + me[:] = 0.5 * (1.0 + self.xp.tanh((self.radius - self.xp.sqrt(r2)) / (np.sqrt(2) * self.eps))) elif self.init_type == 'circle_rand': ndim = len(me.shape) - L = int(self.L) + L = int(self.L[0]) # get random radii for circles/spheres - np.random.seed(1) + self.xp.random.seed(1) lbound = 3.0 * self.eps ubound = 0.5 - self.eps - rand_radii = (ubound - lbound) * np.random.random_sample(size=tuple([L] * ndim)) + lbound + rand_radii = (ubound - lbound) * self.xp.random.random_sample(size=tuple([L] * ndim)) + lbound # distribute circles/spheres tmp = newDistArray(self.fft, False) if ndim == 2: @@ -236,14 +153,14 @@ def u_exact(self, t): # build radius r2 = (self.X[0] + i - L + 0.5) ** 2 + (self.X[1] + j - L + 0.5) ** 2 # add this blob, shifted by 1 to avoid issues with adding up negative contributions - tmp += np.tanh((rand_radii[i, j] - np.sqrt(r2)) / (np.sqrt(2) * self.eps)) + 1 + tmp += self.xp.tanh((rand_radii[i, j] - np.sqrt(r2)) / (np.sqrt(2) * self.eps)) + 1 # normalize to [0,1] tmp *= 0.5 - assert np.all(tmp <= 1.0) + assert self.xp.all(tmp <= 1.0) if self.spectral: me[:] = self.fft.forward(tmp) else: - me[:] = tmp[:] + self.xp.copyto(me, tmp) else: raise NotImplementedError('type of initial value not implemented, got %s' % self.init_type) @@ -289,8 +206,9 @@ def eval_f(self, u, t): f = self.dtype_f(self.init) + f.impl[:] = self._eval_Laplacian(u, f.impl) + if self.spectral: - f.impl = -self.K2 * u tmp = newDistArray(self.fft, False) tmp[:] = self.fft.backward(u, tmp) @@ -301,14 +219,14 @@ def eval_f(self, u, t): tmpf = self.dtype_f(self.init, val=0.0) # build sum over RHS without driving force - Rt_local = float(np.sum(self.fft.backward(f.impl) + tmpf)) + Rt_local = float(self.xp.sum(self.fft.backward(f.impl) + tmpf)) if self.comm is not None: Rt_global = self.comm.allreduce(sendobj=Rt_local, op=MPI.SUM) else: Rt_global = Rt_local # build sum over driving force term - Ht_local = float(np.sum(6.0 * tmp * (1.0 - tmp))) + Ht_local = float(self.xp.sum(6.0 * tmp * (1.0 - tmp))) if self.comm is not None: Ht_global = self.comm.allreduce(sendobj=Ht_local, op=MPI.SUM) else: @@ -324,22 +242,19 @@ def eval_f(self, u, t): f.expl[:] = self.fft.forward(tmpf) else: - u_hat = self.fft.forward(u) - lap_u_hat = -self.K2 * u_hat - f.impl[:] = self.fft.backward(lap_u_hat, f.impl) if self.eps > 0: f.expl = -2.0 / self.eps**2 * u * (1.0 - u) * (1.0 - 2.0 * u) # build sum over RHS without driving force - Rt_local = float(np.sum(f.impl + f.expl)) + Rt_local = float(self.xp.sum(f.impl + f.expl)) if self.comm is not None: Rt_global = self.comm.allreduce(sendobj=Rt_local, op=MPI.SUM) else: Rt_global = Rt_local # build sum over driving force term - Ht_local = float(np.sum(6.0 * u * (1.0 - u))) + Ht_local = float(self.xp.sum(6.0 * u * (1.0 - u))) if self.comm is not None: Ht_global = self.comm.allreduce(sendobj=Ht_local, op=MPI.SUM) else: @@ -353,4 +268,5 @@ def eval_f(self, u, t): f.expl -= 6.0 * dw * u * (1.0 - u) + self.work_counters['rhs']() return f diff --git a/pySDC/implementations/problem_classes/Brusselator.py b/pySDC/implementations/problem_classes/Brusselator.py new file mode 100644 index 0000000000..946e676781 --- /dev/null +++ b/pySDC/implementations/problem_classes/Brusselator.py @@ -0,0 +1,189 @@ +import numpy as np +from mpi4py import MPI + +from pySDC.implementations.problem_classes.generic_MPIFFT_Laplacian import IMEX_Laplacian_MPIFFT + + +class Brusselator(IMEX_Laplacian_MPIFFT): + r""" + Two-dimensional Brusselator from [1]_. + This is a reaction-diffusion equation with non-autonomous source term: + + .. math:: + \frac{\partial u}{\partial t} = \varalpha \Delta u + 1 + u^2 v - 4.4u _ f(x,y,t), + \frac{\partial v}{\partial t} = \varalpha \Delta v + 3.4u - u^2 v + + with the source term :math:`f(x,y,t) = 5` if :math:`(x-0.3)^2 + (y-0.6)^2 <= 0.1^2` and :math:`t >= 1.1` and 0 else. + We discretize in a periodic domain of length 1 and solve with an IMEX scheme based on a spectral method for the + Laplacian which we invert implicitly. We treat the reaction and source terms explicitly. + + References + ---------- + .. [1] https://link.springer.com/book/10.1007/978-3-642-05221-7 + """ + + def __init__(self, alpha=0.1, **kwargs): + """Initialization routine""" + super().__init__(spectral=False, L=1.0, dtype='d', alpha=alpha, **kwargs) + + # prepare the array with two components + shape = (2,) + (self.init[0]) + self.iU = 0 + self.iV = 1 + self.init = (shape, self.comm, np.dtype('float')) + + def _eval_explicit_part(self, u, t, f_expl): + iU, iV = self.iU, self.iV + x, y = self.X[0], self.X[1] + + # evaluate time independent part + f_expl[iU, ...] = 1.0 + u[iU] ** 2 * u[iV] - 4.4 * u[iU] + f_expl[iV, ...] = 3.4 * u[iU] - u[iU] ** 2 * u[iV] + + # add time-dependent part + if t >= 1.1: + mask = (x - 0.3) ** 2 + (y - 0.6) ** 2 <= 0.1**2 + f_expl[iU][mask] += 5.0 + return f_expl + + def eval_f(self, u, t): + """ + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed. + + Returns + ------- + f : dtype_f + The right-hand side of the problem. + """ + f = self.dtype_f(self.init) + + # evaluate Laplacian to be solved implicitly + for i in [self.iU, self.iV]: + f.impl[i, ...] = self._eval_Laplacian(u[i], f.impl[i]) + + f.expl[:] = self._eval_explicit_part(u, t, f.expl) + + self.work_counters['rhs']() + + return f + + def solve_system(self, rhs, factor, u0, t): + """ + Simple FFT solver for the diffusion part. + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the linear system. + factor : float + Abbrev. for the node-to-node stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver (not used here so far). + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + me : dtype_u + Solution. + """ + me = self.dtype_u(self.init) + + for i in [self.iU, self.iV]: + me[i, ...] = self._invert_Laplacian(me[i], factor, rhs[i]) + + return me + + def u_exact(self, t, u_init=None, t_init=None): + r""" + Initial conditions. + + Parameters + ---------- + t : float + Time of the exact solution. + u_init : dtype_u + Initial conditions for getting the exact solution. + t_init : float + The starting time. + + Returns + ------- + me : dtype_u + Exact solution. + """ + + iU, iV = self.iU, self.iV + x, y = self.X[0], self.X[1] + + me = self.dtype_u(self.init, val=0.0) + + if t == 0: + me[iU, ...] = 22.0 * y * (1 - y / self.L[0]) ** (3.0 / 2.0) / self.L[0] + me[iV, ...] = 27.0 * x * (1 - x / self.L[0]) ** (3.0 / 2.0) / self.L[0] + else: + + def eval_rhs(t, u): + f = self.eval_f(u.reshape(self.init[0]), t) + return (f.impl + f.expl).flatten() + + me[...] = self.generate_scipy_reference_solution(eval_rhs, t, u_init, t_init) + + return me + + def get_fig(self): # pragma: no cover + """ + Get a figure suitable to plot the solution of this problem + + Returns + ------- + self.fig : matplotlib.pyplot.figure.Figure + """ + import matplotlib.pyplot as plt + from mpl_toolkits.axes_grid1 import make_axes_locatable + + plt.rcParams['figure.constrained_layout.use'] = True + self.fig, axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=((8, 3))) + divider = make_axes_locatable(axs[1]) + self.cax = divider.append_axes('right', size='3%', pad=0.03) + return self.fig + + def plot(self, u, t=None, fig=None): # pragma: no cover + r""" + Plot the solution. Please supply a figure with the same structure as returned by ``self.get_fig``. + + Parameters + ---------- + u : dtype_u + Solution to be plotted + t : float + Time to display at the top of the figure + fig : matplotlib.pyplot.figure.Figure + Figure with the correct structure + + Returns + ------- + None + """ + fig = self.get_fig() if fig is None else fig + axs = fig.axes + + vmin = u.min() + vmax = u.max() + for i, label in zip([self.iU, self.iV], [r'$u$', r'$v$']): + im = axs[i].pcolormesh(self.X[0], self.X[1], u[i], vmin=vmin, vmax=vmax) + axs[i].set_aspect(1) + axs[i].set_title(label) + + if t is not None: + fig.suptitle(f't = {t:.2e}') + axs[0].set_xlabel(r'$x$') + axs[0].set_ylabel(r'$y$') + fig.colorbar(im, self.cax) diff --git a/pySDC/implementations/problem_classes/DiscontinuousTestODE.py b/pySDC/implementations/problem_classes/DiscontinuousTestODE.py index 16ed3bae84..111a8267ea 100644 --- a/pySDC/implementations/problem_classes/DiscontinuousTestODE.py +++ b/pySDC/implementations/problem_classes/DiscontinuousTestODE.py @@ -214,7 +214,7 @@ def get_switching_info(self, u, t): m_guess = m - 1 break - state_function = [u[m][0] - 5 for m in range(len(u))] if switch_detected else [] + state_function = [u[m][0] - 5 for m in range(len(u))] return switch_detected, m_guess, state_function def count_switches(self): @@ -222,3 +222,64 @@ def count_switches(self): Setter to update the number of switches if one is found. """ self.nswitches += 1 + + +class ExactDiscontinuousTestODE(DiscontinuousTestODE): + r""" + Dummy ODE problem for testing the ``SwitchEstimator`` class. The problem contains the exact dynamics + of the problem class ``DiscontinuousTestODE``. + """ + + def __init__(self, newton_maxiter=100, newton_tol=1e-8): + """Initialization routine""" + super().__init__(newton_maxiter, newton_tol) + + def eval_f(self, u, t): + """ + Derivative. + + Parameters + ---------- + u : dtype_u + Exact value of u. + t : float + Time :math:`t`. + + Returns + ------- + f : dtype_f + Derivative. + """ + + f = self.dtype_f(self.init) + + t_switch = np.inf if self.t_switch is None else self.t_switch + h = u[0] - 5 + if h >= 0 or t >= t_switch: + f[:] = 1 + else: + f[:] = np.exp(t) + return f + + def solve_system(self, rhs, factor, u0, t): + """ + Just return the exact solution... + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the linear system. + factor : float + Abbrev. for the local stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + me : dtype_u + The solution as mesh. + """ + + return self.u_exact(t) diff --git a/pySDC/implementations/problem_classes/GrayScott_MPIFFT.py b/pySDC/implementations/problem_classes/GrayScott_MPIFFT.py index 0eaa5c109a..293b1c846e 100644 --- a/pySDC/implementations/problem_classes/GrayScott_MPIFFT.py +++ b/pySDC/implementations/problem_classes/GrayScott_MPIFFT.py @@ -1,16 +1,16 @@ -import numpy as np import scipy.sparse as sp from mpi4py import MPI from mpi4py_fft import PFFT from pySDC.core.Errors import ProblemError -from pySDC.core.Problem import ptype +from pySDC.core.Problem import ptype, WorkCounter from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh, comp2_mesh +from pySDC.implementations.problem_classes.generic_MPIFFT_Laplacian import IMEX_Laplacian_MPIFFT from mpi4py_fft import newDistArray -class grayscott_imex_diffusion(ptype): +class grayscott_imex_diffusion(IMEX_Laplacian_MPIFFT): r""" The Gray-Scott system [1]_ describes a reaction-diffusion process of two substances :math:`u` and :math:`v`, where they diffuse over time. During the reaction :math:`u` is used up with overall decay rate :math:`B`, @@ -71,68 +71,21 @@ class grayscott_imex_diffusion(ptype): .. [3] https://www.chebfun.org/examples/pde/GrayScott.html """ - dtype_u = mesh - dtype_f = imex_mesh + def __init__(self, Du=1.0, Dv=0.01, A=0.09, B=0.086, **kwargs): + kwargs['L'] = 2.0 + super().__init__(dtype='d', alpha=1.0, x0=-kwargs['L'] / 2.0, **kwargs) - def __init__(self, nvars=None, Du=1.0, Dv=0.01, A=0.09, B=0.086, spectral=None, L=2.0, comm=MPI.COMM_WORLD): - """Initialization routine""" - nvars = (127, 127) if nvars is None else nvars - if not (isinstance(nvars, tuple) and len(nvars) > 1): - raise ProblemError('Need at least two dimensions') - - # Creating FFT structure - self.ndim = len(nvars) - axes = tuple(range(self.ndim)) - self.fft = PFFT( - comm, - list(nvars), - axes=axes, - dtype=np.float64, - collapse=True, - backend='fftw', - ) - - # get test data to figure out type and dimensions - tmp_u = newDistArray(self.fft, spectral) - - # add two components to contain field and temperature - self.ncomp = 2 - sizes = tmp_u.shape + (self.ncomp,) - - # invoke super init, passing the communicator and the local dimensions as init - super().__init__(init=(sizes, comm, tmp_u.dtype)) - self._makeAttributeAndRegister( - 'nvars', 'Du', 'Dv', 'A', 'B', 'spectral', 'L', 'comm', localVars=locals(), readOnly=True - ) - - L = np.array([self.L] * self.ndim, dtype=float) - - # get local mesh - X = np.ogrid[self.fft.local_slice(False)] - N = self.fft.global_shape() - for i in range(len(N)): - X[i] = -L[i] / 2 + (X[i] * L[i] / N[i]) - self.X = [np.broadcast_to(x, self.fft.shape(False)) for x in X] - - # get local wavenumbers and Laplace operator - s = self.fft.local_slice() - N = self.fft.global_shape() - k = [np.fft.fftfreq(n, 1.0 / n).astype(int) for n in N[:-1]] - k.append(np.fft.rfftfreq(N[-1], 1.0 / N[-1]).astype(int)) - K = [ki[si] for ki, si in zip(k, s)] - Ks = np.meshgrid(*K, indexing='ij', sparse=True) - Lp = 2 * np.pi / L - for i in range(self.ndim): - Ks[i] = (Ks[i] * Lp[i]).astype(float) - K = [np.broadcast_to(k, self.fft.shape(True)) for k in Ks] - K = np.array(K).astype(float) - self.K2 = np.sum(K * K, 0, dtype=float) - self.Ku = -self.K2 * self.Du - self.Kv = -self.K2 * self.Dv - - # Need this for diagnostics - self.dx = self.L / nvars[0] - self.dy = self.L / nvars[1] + # prepare the array with two components + shape = (2,) + (self.init[0]) + self.iU = 0 + self.iV = 1 + self.init = (shape, self.comm, self.xp.dtype('float')) + + self._makeAttributeAndRegister('Du', 'Dv', 'A', 'B', localVars=locals(), readOnly=True) + + # prepare "Laplacians" + self.Ku = -self.Du * self.K2 + self.Kv = -self.Dv * self.K2 def eval_f(self, u, t): """ @@ -154,27 +107,28 @@ def eval_f(self, u, t): f = self.dtype_f(self.init) if self.spectral: - f.impl[..., 0] = self.Ku * u[..., 0] - f.impl[..., 1] = self.Kv * u[..., 1] + f.impl[0, ...] = self.Ku * u[0, ...] + f.impl[1, ...] = self.Kv * u[1, ...] tmpu = newDistArray(self.fft, False) tmpv = newDistArray(self.fft, False) - tmpu[:] = self.fft.backward(u[..., 0], tmpu) - tmpv[:] = self.fft.backward(u[..., 1], tmpv) + tmpu[:] = self.fft.backward(u[0, ...], tmpu) + tmpv[:] = self.fft.backward(u[1, ...], tmpv) tmpfu = -tmpu * tmpv**2 + self.A * (1 - tmpu) tmpfv = tmpu * tmpv**2 - self.B * tmpv - f.expl[..., 0] = self.fft.forward(tmpfu) - f.expl[..., 1] = self.fft.forward(tmpfv) + f.expl[0, ...] = self.fft.forward(tmpfu) + f.expl[1, ...] = self.fft.forward(tmpfv) else: - u_hat = self.fft.forward(u[..., 0]) + u_hat = self.fft.forward(u[0, ...]) lap_u_hat = self.Ku * u_hat - f.impl[..., 0] = self.fft.backward(lap_u_hat, f.impl[..., 0]) - u_hat = self.fft.forward(u[..., 1]) + f.impl[0, ...] = self.fft.backward(lap_u_hat, f.impl[0, ...]) + u_hat = self.fft.forward(u[1, ...]) lap_u_hat = self.Kv * u_hat - f.impl[..., 1] = self.fft.backward(lap_u_hat, f.impl[..., 1]) - f.expl[..., 0] = -u[..., 0] * u[..., 1] ** 2 + self.A * (1 - u[..., 0]) - f.expl[..., 1] = u[..., 0] * u[..., 1] ** 2 - self.B * u[..., 1] + f.impl[1, ...] = self.fft.backward(lap_u_hat, f.impl[1, ...]) + f.expl[0, ...] = -u[0, ...] * u[1, ...] ** 2 + self.A * (1 - u[0, ...]) + f.expl[1, ...] = u[0, ...] * u[1, ...] ** 2 - self.B * u[1, ...] + self.work_counters['rhs']() return f def solve_system(self, rhs, factor, u0, t): @@ -200,16 +154,16 @@ def solve_system(self, rhs, factor, u0, t): me = self.dtype_u(self.init) if self.spectral: - me[..., 0] = rhs[..., 0] / (1.0 - factor * self.Ku) - me[..., 1] = rhs[..., 1] / (1.0 - factor * self.Kv) + me[0, ...] = rhs[0, ...] / (1.0 - factor * self.Ku) + me[1, ...] = rhs[1, ...] / (1.0 - factor * self.Kv) else: - rhs_hat = self.fft.forward(rhs[..., 0]) + rhs_hat = self.fft.forward(rhs[0, ...]) rhs_hat /= 1.0 - factor * self.Ku - me[..., 0] = self.fft.backward(rhs_hat, me[..., 0]) - rhs_hat = self.fft.forward(rhs[..., 1]) + me[0, ...] = self.fft.backward(rhs_hat, me[0, ...]) + rhs_hat = self.fft.forward(rhs[1, ...]) rhs_hat /= 1.0 - factor * self.Kv - me[..., 1] = self.fft.backward(rhs_hat, me[..., 1]) + me[1, ...] = self.fft.backward(rhs_hat, me[1, ...]) return me @@ -234,19 +188,13 @@ def u_exact(self, t): # This assumes that the box is [-L/2, L/2]^2 if self.spectral: - tmp = 1.0 - np.exp(-80.0 * ((self.X[0] + 0.05) ** 2 + (self.X[1] + 0.02) ** 2)) - me[..., 0] = self.fft.forward(tmp) - tmp = np.exp(-80.0 * ((self.X[0] - 0.05) ** 2 + (self.X[1] - 0.02) ** 2)) - me[..., 1] = self.fft.forward(tmp) + tmp = 1.0 - self.xp.exp(-80.0 * ((self.X[0] + 0.05) ** 2 + (self.X[1] + 0.02) ** 2)) + me[0, ...] = self.fft.forward(tmp) + tmp = self.xp.exp(-80.0 * ((self.X[0] - 0.05) ** 2 + (self.X[1] - 0.02) ** 2)) + me[1, ...] = self.fft.forward(tmp) else: - me[..., 0] = 1.0 - np.exp(-80.0 * ((self.X[0] + 0.05) ** 2 + (self.X[1] + 0.02) ** 2)) - me[..., 1] = np.exp(-80.0 * ((self.X[0] - 0.05) ** 2 + (self.X[1] - 0.02) ** 2)) - - # tmpu = np.load('data/u_0001.npy') - # tmpv = np.load('data/v_0001.npy') - # - # me[..., 0] = self.fft.forward(tmpu) - # me[..., 1] = self.fft.forward(tmpv) + me[0, ...] = 1.0 - self.xp.exp(-80.0 * ((self.X[0] + 0.05) ** 2 + (self.X[1] + 0.02) ** 2)) + me[1, ...] = self.xp.exp(-80.0 * ((self.X[0] - 0.05) ** 2 + (self.X[1] - 0.02) ** 2)) return me @@ -272,10 +220,8 @@ class grayscott_imex_linear(grayscott_imex_diffusion): part is computed in an explicit way). """ - def __init__(self, nvars=None, Du=1.0, Dv=0.01, A=0.09, B=0.086, spectral=None, L=2.0, comm=MPI.COMM_WORLD): - """Initialization routine""" - nvars = (127, 127) if nvars is None else nvars - super().__init__(nvars, Du, Dv, A, B, spectral, L, comm) + def __init__(self, **kwargs): + super().__init__(**kwargs) self.Ku -= self.A self.Kv -= self.B @@ -299,27 +245,28 @@ def eval_f(self, u, t): f = self.dtype_f(self.init) if self.spectral: - f.impl[..., 0] = self.Ku * u[..., 0] - f.impl[..., 1] = self.Kv * u[..., 1] + f.impl[0, ...] = self.Ku * u[0, ...] + f.impl[1, ...] = self.Kv * u[1, ...] tmpu = newDistArray(self.fft, False) tmpv = newDistArray(self.fft, False) - tmpu[:] = self.fft.backward(u[..., 0], tmpu) - tmpv[:] = self.fft.backward(u[..., 1], tmpv) + tmpu[:] = self.fft.backward(u[0, ...], tmpu) + tmpv[:] = self.fft.backward(u[1, ...], tmpv) tmpfu = -tmpu * tmpv**2 + self.A tmpfv = tmpu * tmpv**2 - f.expl[..., 0] = self.fft.forward(tmpfu) - f.expl[..., 1] = self.fft.forward(tmpfv) + f.expl[0, ...] = self.fft.forward(tmpfu) + f.expl[1, ...] = self.fft.forward(tmpfv) else: - u_hat = self.fft.forward(u[..., 0]) + u_hat = self.fft.forward(u[0, ...]) lap_u_hat = self.Ku * u_hat - f.impl[..., 0] = self.fft.backward(lap_u_hat, f.impl[..., 0]) - u_hat = self.fft.forward(u[..., 1]) + f.impl[0, ...] = self.fft.backward(lap_u_hat, f.impl[0, ...]) + u_hat = self.fft.forward(u[1, ...]) lap_u_hat = self.Kv * u_hat - f.impl[..., 1] = self.fft.backward(lap_u_hat, f.impl[..., 1]) - f.expl[..., 0] = -u[..., 0] * u[..., 1] ** 2 + self.A - f.expl[..., 1] = u[..., 0] * u[..., 1] ** 2 + f.impl[1, ...] = self.fft.backward(lap_u_hat, f.impl[1, ...]) + f.expl[0, ...] = -u[0, ...] * u[1, ...] ** 2 + self.A + f.expl[1, ...] = u[0, ...] * u[1, ...] ** 2 + self.work_counters['rhs']() return f @@ -387,22 +334,18 @@ class grayscott_mi_diffusion(grayscott_imex_diffusion): def __init__( self, - nvars=None, - Du=1.0, - Dv=0.01, - A=0.09, - B=0.086, - spectral=None, newton_maxiter=100, newton_tol=1e-12, - L=2.0, - comm=MPI.COMM_WORLD, + **kwargs, ): """Initialization routine""" - nvars = (127, 127) if nvars is None else nvars - super().__init__(nvars, Du, Dv, A, B, spectral, L, comm) + super().__init__(**kwargs) # This may not run in parallel yet.. assert self.comm.Get_size() == 1 + self.work_counters['newton'] = WorkCounter() + self.Ku = -self.Du * self.K2 + self.Kv = -self.Dv * self.K2 + self._makeAttributeAndRegister('newton_maxiter', 'newton_tol', localVars=locals(), readOnly=False) def eval_f(self, u, t): """ @@ -424,27 +367,28 @@ def eval_f(self, u, t): f = self.dtype_f(self.init) if self.spectral: - f.comp1[..., 0] = self.Ku * u[..., 0] - f.comp1[..., 1] = self.Kv * u[..., 1] + f.comp1[0, ...] = self.Ku * u[0, ...] + f.comp1[1, ...] = self.Kv * u[1, ...] tmpu = newDistArray(self.fft, False) tmpv = newDistArray(self.fft, False) - tmpu[:] = self.fft.backward(u[..., 0], tmpu) - tmpv[:] = self.fft.backward(u[..., 1], tmpv) + tmpu[:] = self.fft.backward(u[0, ...], tmpu) + tmpv[:] = self.fft.backward(u[1, ...], tmpv) tmpfu = -tmpu * tmpv**2 + self.A * (1 - tmpu) tmpfv = tmpu * tmpv**2 - self.B * tmpv - f.comp2[..., 0] = self.fft.forward(tmpfu) - f.comp2[..., 1] = self.fft.forward(tmpfv) + f.comp2[0, ...] = self.fft.forward(tmpfu) + f.comp2[1, ...] = self.fft.forward(tmpfv) else: - u_hat = self.fft.forward(u[..., 0]) + u_hat = self.fft.forward(u[0, ...]) lap_u_hat = self.Ku * u_hat - f.comp1[..., 0] = self.fft.backward(lap_u_hat, f.comp1[..., 0]) - u_hat = self.fft.forward(u[..., 1]) + f.comp1[0, ...] = self.fft.backward(lap_u_hat, f.comp1[0, ...]) + u_hat = self.fft.forward(u[1, ...]) lap_u_hat = self.Kv * u_hat - f.comp1[..., 1] = self.fft.backward(lap_u_hat, f.comp1[..., 1]) - f.comp2[..., 0] = -u[..., 0] * u[..., 1] ** 2 + self.A * (1 - u[..., 0]) - f.comp2[..., 1] = u[..., 0] * u[..., 1] ** 2 - self.B * u[..., 1] + f.comp1[1, ...] = self.fft.backward(lap_u_hat, f.comp1[1, ...]) + f.comp2[0, ...] = -u[0, ...] * u[1, ...] ** 2 + self.A * (1 - u[0, ...]) + f.comp2[1, ...] = u[0, ...] * u[1, ...] ** 2 - self.B * u[1, ...] + self.work_counters['rhs']() return f def solve_system_1(self, rhs, factor, u0, t): @@ -468,7 +412,7 @@ def solve_system_1(self, rhs, factor, u0, t): The solution as mesh. """ - me = super(grayscott_mi_diffusion, self).solve_system(rhs, factor, u0, t) + me = super().solve_system(rhs, factor, u0, t) return me def solve_system_2(self, rhs, factor, u0, t): @@ -496,18 +440,18 @@ def solve_system_2(self, rhs, factor, u0, t): if self.spectral: tmpu = newDistArray(self.fft, False) tmpv = newDistArray(self.fft, False) - tmpu[:] = self.fft.backward(u[..., 0], tmpu) - tmpv[:] = self.fft.backward(u[..., 1], tmpv) + tmpu[:] = self.fft.backward(u[0, ...], tmpu) + tmpv[:] = self.fft.backward(u[1, ...], tmpv) tmprhsu = newDistArray(self.fft, False) tmprhsv = newDistArray(self.fft, False) - tmprhsu[:] = self.fft.backward(rhs[..., 0], tmprhsu) - tmprhsv[:] = self.fft.backward(rhs[..., 1], tmprhsv) + tmprhsu[:] = self.fft.backward(rhs[0, ...], tmprhsu) + tmprhsv[:] = self.fft.backward(rhs[1, ...], tmprhsv) else: - tmpu = u[..., 0] - tmpv = u[..., 1] - tmprhsu = rhs[..., 0] - tmprhsv = rhs[..., 1] + tmpu = u[0, ...] + tmpv = u[1, ...] + tmprhsu = rhs[0, ...] + tmprhsv = rhs[1, ...] # start newton iteration n = 0 @@ -519,7 +463,7 @@ def solve_system_2(self, rhs, factor, u0, t): tmpgv = tmpv - tmprhsv - factor * (tmpu * tmpv**2 - self.B * tmpv) # if g is close to 0, then we are done - res = max(np.linalg.norm(tmpgu, np.inf), np.linalg.norm(tmpgv, np.inf)) + res = max(self.xp.linalg.norm(tmpgu, self.xp.inf), self.xp.linalg.norm(tmpgv, self.xp.inf)) if res < self.newton_tol: break @@ -530,17 +474,19 @@ def solve_system_2(self, rhs, factor, u0, t): dg11 = 1 - factor * (2 * tmpu * tmpv - self.B) # interleave and unravel to put into sparse matrix - dg00I = np.ravel(np.kron(dg00, np.array([1, 0]))) - dg01I = np.ravel(np.kron(dg01, np.array([1, 0]))) - dg10I = np.ravel(np.kron(dg10, np.array([1, 0]))) - dg11I = np.ravel(np.kron(dg11, np.array([0, 1]))) + dg00I = self.xp.ravel(self.xp.kron(dg00, self.xp.array([1, 0]))) + dg01I = self.xp.ravel(self.xp.kron(dg01, self.xp.array([1, 0]))) + dg10I = self.xp.ravel(self.xp.kron(dg10, self.xp.array([1, 0]))) + dg11I = self.xp.ravel(self.xp.kron(dg11, self.xp.array([0, 1]))) # put into sparse matrix dg = sp.diags(dg00I, offsets=0) + sp.diags(dg11I, offsets=0) dg += sp.diags(dg01I, offsets=1, shape=dg.shape) + sp.diags(dg10I, offsets=-1, shape=dg.shape) # interleave g terms to apply inverse to it - g = np.kron(tmpgu.flatten(), np.array([1, 0])) + np.kron(tmpgv.flatten(), np.array([0, 1])) + g = self.xp.kron(tmpgu.flatten(), self.xp.array([1, 0])) + self.xp.kron( + tmpgv.flatten(), self.xp.array([0, 1]) + ) # invert dg matrix b = sp.linalg.spsolve(dg, g) # update real space vectors @@ -549,24 +495,23 @@ def solve_system_2(self, rhs, factor, u0, t): # increase iteration count n += 1 + self.work_counters['newton']() - if np.isnan(res) and self.stop_at_nan: + if self.xp.isnan(res) and self.stop_at_nan: raise ProblemError('Newton got nan after %i iterations, aborting...' % n) - elif np.isnan(res): + elif self.xp.isnan(res): self.logger.warning('Newton got nan after %i iterations...' % n) if n == self.newton_maxiter: self.logger.warning('Newton did not converge after %i iterations, error is %s' % (n, res)) - # self.newton_ncalls += 1 - # self.newton_itercount += n me = self.dtype_u(self.init) if self.spectral: - me[..., 0] = self.fft.forward(tmpu) - me[..., 1] = self.fft.forward(tmpv) + me[0, ...] = self.fft.forward(tmpu) + me[1, ...] = self.fft.forward(tmpv) else: - me[..., 0] = tmpu - me[..., 1] = tmpv + me[0, ...] = tmpu + me[1, ...] = tmpv return me @@ -595,22 +540,18 @@ class grayscott_mi_linear(grayscott_imex_linear): def __init__( self, - nvars=None, - Du=1.0, - Dv=0.01, - A=0.09, - B=0.086, - spectral=None, newton_maxiter=100, newton_tol=1e-12, - L=2.0, - comm=MPI.COMM_WORLD, + **kwargs, ): """Initialization routine""" - nvars = (127, 127) if nvars is None else nvars - super().__init__(nvars, Du, Dv, A, B, spectral, L, comm) + super().__init__(**kwargs) # This may not run in parallel yet.. assert self.comm.Get_size() == 1 + self.work_counters['newton'] = WorkCounter() + self.Ku = -self.Du * self.K2 - self.A + self.Kv = -self.Dv * self.K2 - self.B + self._makeAttributeAndRegister('newton_maxiter', 'newton_tol', localVars=locals(), readOnly=False) def eval_f(self, u, t): """ @@ -632,27 +573,28 @@ def eval_f(self, u, t): f = self.dtype_f(self.init) if self.spectral: - f.comp1[..., 0] = self.Ku * u[..., 0] - f.comp1[..., 1] = self.Kv * u[..., 1] + f.comp1[0, ...] = self.Ku * u[0, ...] + f.comp1[1, ...] = self.Kv * u[1, ...] tmpu = newDistArray(self.fft, False) tmpv = newDistArray(self.fft, False) - tmpu[:] = self.fft.backward(u[..., 0], tmpu) - tmpv[:] = self.fft.backward(u[..., 1], tmpv) + tmpu[:] = self.fft.backward(u[0, ...], tmpu) + tmpv[:] = self.fft.backward(u[1, ...], tmpv) tmpfu = -tmpu * tmpv**2 + self.A tmpfv = tmpu * tmpv**2 - f.comp2[..., 0] = self.fft.forward(tmpfu) - f.comp2[..., 1] = self.fft.forward(tmpfv) + f.comp2[0, ...] = self.fft.forward(tmpfu) + f.comp2[1, ...] = self.fft.forward(tmpfv) else: - u_hat = self.fft.forward(u[..., 0]) + u_hat = self.fft.forward(u[0, ...]) lap_u_hat = self.Ku * u_hat - f.comp1[..., 0] = self.fft.backward(lap_u_hat, f.comp1[..., 0]) - u_hat = self.fft.forward(u[..., 1]) + f.comp1[0, ...] = self.fft.backward(lap_u_hat, f.comp1[0, ...]) + u_hat = self.fft.forward(u[1, ...]) lap_u_hat = self.Kv * u_hat - f.comp1[..., 1] = self.fft.backward(lap_u_hat, f.comp1[..., 1]) - f.comp2[..., 0] = -u[..., 0] * u[..., 1] ** 2 + self.A - f.comp2[..., 1] = u[..., 0] * u[..., 1] ** 2 + f.comp1[1, ...] = self.fft.backward(lap_u_hat, f.comp1[1, ...]) + f.comp2[0, ...] = -u[0, ...] * u[1, ...] ** 2 + self.A + f.comp2[1, ...] = u[0, ...] * u[1, ...] ** 2 + self.work_counters['rhs']() return f def solve_system_1(self, rhs, factor, u0, t): @@ -704,18 +646,18 @@ def solve_system_2(self, rhs, factor, u0, t): if self.spectral: tmpu = newDistArray(self.fft, False) tmpv = newDistArray(self.fft, False) - tmpu[:] = self.fft.backward(u[..., 0], tmpu) - tmpv[:] = self.fft.backward(u[..., 1], tmpv) + tmpu[:] = self.fft.backward(u[0, ...], tmpu) + tmpv[:] = self.fft.backward(u[1, ...], tmpv) tmprhsu = newDistArray(self.fft, False) tmprhsv = newDistArray(self.fft, False) - tmprhsu[:] = self.fft.backward(rhs[..., 0], tmprhsu) - tmprhsv[:] = self.fft.backward(rhs[..., 1], tmprhsv) + tmprhsu[:] = self.fft.backward(rhs[0, ...], tmprhsu) + tmprhsv[:] = self.fft.backward(rhs[1, ...], tmprhsv) else: - tmpu = u[..., 0] - tmpv = u[..., 1] - tmprhsu = rhs[..., 0] - tmprhsv = rhs[..., 1] + tmpu = u[0, ...] + tmpv = u[1, ...] + tmprhsu = rhs[0, ...] + tmprhsv = rhs[1, ...] # start newton iteration n = 0 @@ -727,7 +669,7 @@ def solve_system_2(self, rhs, factor, u0, t): tmpgv = tmpv - tmprhsv - factor * (tmpu * tmpv**2) # if g is close to 0, then we are done - res = max(np.linalg.norm(tmpgu, np.inf), np.linalg.norm(tmpgv, np.inf)) + res = max(self.xp.linalg.norm(tmpgu, self.xp.inf), self.xp.linalg.norm(tmpgv, self.xp.inf)) if res < self.newton_tol: break @@ -738,17 +680,19 @@ def solve_system_2(self, rhs, factor, u0, t): dg11 = 1 - factor * (2 * tmpu * tmpv) # interleave and unravel to put into sparse matrix - dg00I = np.ravel(np.kron(dg00, np.array([1, 0]))) - dg01I = np.ravel(np.kron(dg01, np.array([1, 0]))) - dg10I = np.ravel(np.kron(dg10, np.array([1, 0]))) - dg11I = np.ravel(np.kron(dg11, np.array([0, 1]))) + dg00I = self.xp.ravel(self.xp.kron(dg00, self.xp.array([1, 0]))) + dg01I = self.xp.ravel(self.xp.kron(dg01, self.xp.array([1, 0]))) + dg10I = self.xp.ravel(self.xp.kron(dg10, self.xp.array([1, 0]))) + dg11I = self.xp.ravel(self.xp.kron(dg11, self.xp.array([0, 1]))) # put into sparse matrix dg = sp.diags(dg00I, offsets=0) + sp.diags(dg11I, offsets=0) dg += sp.diags(dg01I, offsets=1, shape=dg.shape) + sp.diags(dg10I, offsets=-1, shape=dg.shape) # interleave g terms to apply inverse to it - g = np.kron(tmpgu.flatten(), np.array([1, 0])) + np.kron(tmpgv.flatten(), np.array([0, 1])) + g = self.xp.kron(tmpgu.flatten(), self.xp.array([1, 0])) + self.xp.kron( + tmpgv.flatten(), self.xp.array([0, 1]) + ) # invert dg matrix b = sp.linalg.spsolve(dg, g) # update real-space vectors @@ -757,22 +701,21 @@ def solve_system_2(self, rhs, factor, u0, t): # increase iteration count n += 1 + self.work_counters['newton']() - if np.isnan(res) and self.stop_at_nan: + if self.xp.isnan(res) and self.stop_at_nan: raise ProblemError('Newton got nan after %i iterations, aborting...' % n) - elif np.isnan(res): + elif self.xp.isnan(res): self.logger.warning('Newton got nan after %i iterations...' % n) if n == self.newton_maxiter: self.logger.warning('Newton did not converge after %i iterations, error is %s' % (n, res)) - # self.newton_ncalls += 1 - # self.newton_itercount += n me = self.dtype_u(self.init) if self.spectral: - me[..., 0] = self.fft.forward(tmpu) - me[..., 1] = self.fft.forward(tmpv) + me[0, ...] = self.fft.forward(tmpu) + me[1, ...] = self.fft.forward(tmpv) else: - me[..., 0] = tmpu - me[..., 1] = tmpv + me[0, ...] = tmpu + me[1, ...] = tmpv return me diff --git a/pySDC/implementations/problem_classes/HarmonicOscillator.py b/pySDC/implementations/problem_classes/HarmonicOscillator.py index ae62d1d650..d38ab4a096 100644 --- a/pySDC/implementations/problem_classes/HarmonicOscillator.py +++ b/pySDC/implementations/problem_classes/HarmonicOscillator.py @@ -28,6 +28,7 @@ class harmonic_oscillator(ptype): Phase of the oscillation. amp : float, optional Amplitude of the oscillation. + Source: https://beltoforion.de/en/harmonic_oscillator/ """ dtype_u = particles diff --git a/pySDC/implementations/problem_classes/Lorenz.py b/pySDC/implementations/problem_classes/Lorenz.py index 5e222b26e0..dbd5260a7e 100644 --- a/pySDC/implementations/problem_classes/Lorenz.py +++ b/pySDC/implementations/problem_classes/Lorenz.py @@ -32,9 +32,10 @@ class LorenzAttractor(ptype): .. math:: \frac{d y_3(t)}{dt} = y_1 (t) y_2 (t) - \beta y_3 (t) - with initial condition :math:`(y_1(0), y_2(0), y_3(0))^T = (1, 1, 1)^T` for :math:`t \in [0, 1]`. The problem parameters - for this problem are :math:`\sigma = 10`, :math:`\rho = 28` and :math:`\beta = 8/3`. Lorenz chose these parameters such - that the Reynolds number :math:`\rho` is slightly supercritical as to provoke instability of steady convection. + with initial condition :math:`(y_1(0), y_2(0), y_3(0))^T = (1, 1, 1)^T` (default) for :math:`t \in [0, 1]`. + The problem parameters for this problem are :math:`\sigma = 10`, :math:`\rho = 28` and :math:`\beta = 8/3`. + Lorenz chose these parameters such that the Reynolds number :math:`\rho` is slightly supercritical + as to provoke instability of steady convection. Parameters ---------- @@ -44,6 +45,8 @@ class LorenzAttractor(ptype): Parameter :math:`\rho` of the problem. beta : float, optional Parameter :math:`\beta` of the problem. + u0 : tuple, optional + Initial solution :math:`u_0` of the problem. newton_tol : float, optional Tolerance for Newton for termination. newton_maxiter : int, optional @@ -58,7 +61,9 @@ class LorenzAttractor(ptype): dtype_u = mesh dtype_f = mesh - def __init__(self, sigma=10.0, rho=28.0, beta=8.0 / 3.0, newton_tol=1e-9, newton_maxiter=99, stop_at_nan=True): + def __init__( + self, sigma=10.0, rho=28.0, beta=8.0 / 3.0, u0=(1, 1, 1), newton_tol=1e-9, newton_maxiter=99, stop_at_nan=True + ): """Initialization routine""" nvars = 3 @@ -66,7 +71,7 @@ def __init__(self, sigma=10.0, rho=28.0, beta=8.0 / 3.0, newton_tol=1e-9, newton super().__init__(init=(nvars, None, np.dtype('float64'))) self._makeAttributeAndRegister('nvars', 'stop_at_nan', localVars=locals(), readOnly=True) self._makeAttributeAndRegister( - 'sigma', 'rho', 'beta', 'newton_tol', 'newton_maxiter', localVars=locals(), readOnly=False + 'sigma', 'rho', 'beta', 'u0', 'newton_tol', 'newton_maxiter', localVars=locals(), readOnly=False ) self.work_counters['newton'] = WorkCounter() self.work_counters['rhs'] = WorkCounter() @@ -225,5 +230,5 @@ def eval_rhs(t, u): me[:] = self.generate_scipy_reference_solution(eval_rhs, t, u_init, t_init) else: - me[:] = 1.0 + me[:] = self.u0 return me diff --git a/pySDC/implementations/problem_classes/NonlinearSchroedinger_MPIFFT.py b/pySDC/implementations/problem_classes/NonlinearSchroedinger_MPIFFT.py index 66c96c84ae..70c628ec1a 100644 --- a/pySDC/implementations/problem_classes/NonlinearSchroedinger_MPIFFT.py +++ b/pySDC/implementations/problem_classes/NonlinearSchroedinger_MPIFFT.py @@ -1,18 +1,14 @@ import numpy as np -from scipy.optimize import newton_krylov, root +from scipy.optimize import newton_krylov from scipy.optimize.nonlin import NoConvergence -import scipy.sparse as sp -from mpi4py import MPI -from mpi4py_fft import PFFT from pySDC.core.Errors import ProblemError -from pySDC.core.Problem import ptype, WorkCounter -from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh +from pySDC.core.Problem import WorkCounter +from pySDC.implementations.problem_classes.generic_MPIFFT_Laplacian import IMEX_Laplacian_MPIFFT +from pySDC.implementations.datatype_classes.mesh import mesh -from mpi4py_fft import newDistArray - -class nonlinearschroedinger_imex(ptype): +class nonlinearschroedinger_imex(IMEX_Laplacian_MPIFFT): r""" Example implementing the :math:`N`-dimensional nonlinear Schrödinger equation with periodic boundary conditions @@ -51,130 +47,17 @@ class nonlinearschroedinger_imex(ptype): Journal of Parallel and Distributed Computing (2019). """ - dtype_u = mesh - dtype_f = imex_mesh - - def __init__(self, nvars=None, spectral=False, L=2 * np.pi, c=1.0, comm=MPI.COMM_WORLD): + def __init__(self, c=1.0, **kwargs): """Initialization routine""" - - if nvars is None: - nvars = (128, 128) - - if not L == 2.0 * np.pi: - raise ProblemError(f'Setup not implemented, L has to be 2pi, got {L}') + super().__init__(L=2 * np.pi, alpha=1j, dtype='D', **kwargs) if not (c == 0.0 or c == 1.0): raise ProblemError(f'Setup not implemented, c has to be 0 or 1, got {c}') + self._makeAttributeAndRegister('c', localVars=locals(), readOnly=True) - if not (isinstance(nvars, tuple) and len(nvars) > 1): - raise ProblemError('Need at least two dimensions') - - # Creating FFT structure - self.ndim = len(nvars) - axes = tuple(range(self.ndim)) - self.fft = PFFT(comm, list(nvars), axes=axes, dtype=np.complex128, collapse=True) - - # get test data to figure out type and dimensions - tmp_u = newDistArray(self.fft, spectral) - - L = np.array([L] * self.ndim, dtype=float) - - # invoke super init, passing the communicator and the local dimensions as init - super(nonlinearschroedinger_imex, self).__init__(init=(tmp_u.shape, comm, tmp_u.dtype)) - self._makeAttributeAndRegister('nvars', 'spectral', 'L', 'c', 'comm', localVars=locals(), readOnly=True) - - # get local mesh - X = np.ogrid[self.fft.local_slice(False)] - N = self.fft.global_shape() - for i in range(len(N)): - X[i] = X[i] * self.L[i] / N[i] - self.X = [np.broadcast_to(x, self.fft.shape(False)) for x in X] - - # get local wavenumbers and Laplace operator - s = self.fft.local_slice() - N = self.fft.global_shape() - k = [np.fft.fftfreq(n, 1.0 / n).astype(int) for n in N] - K = [ki[si] for ki, si in zip(k, s)] - Ks = np.meshgrid(*K, indexing='ij', sparse=True) - Lp = 2 * np.pi / self.L - for i in range(self.ndim): - Ks[i] = (Ks[i] * Lp[i]).astype(float) - K = [np.broadcast_to(k, self.fft.shape(True)) for k in Ks] - K = np.array(K).astype(float) - self.K2 = np.sum(K * K, 0, dtype=float) - - # Need this for diagnostics - self.dx = self.L / nvars[0] - self.dy = self.L / nvars[1] - - # work counters - self.work_counters['rhs'] = WorkCounter() - - def eval_f(self, u, t): - """ - Routine to evaluate the right-hand side of the problem. - - Parameters - ---------- - u : dtype_u - Current values of the numerical solution. - t : float - Current time at which the numerical solution is computed. - - Returns - ------- - f : dtype_f - The right-hand side of the problem. - """ - - f = self.dtype_f(self.init) - - if self.spectral: - f.impl = -self.K2 * 1j * u - tmp = self.fft.backward(u) - tmpf = self.ndim * self.c * 2j * np.absolute(tmp) ** 2 * tmp - f.expl[:] = self.fft.forward(tmpf) - - else: - u_hat = self.fft.forward(u) - lap_u_hat = -self.K2 * 1j * u_hat - f.impl[:] = self.fft.backward(lap_u_hat, f.impl) - f.expl = self.ndim * self.c * 2j * np.absolute(u) ** 2 * u - - self.work_counters['rhs']() - return f - - def solve_system(self, rhs, factor, u0, t): - """ - Simple FFT solver for the diffusion part. - - Parameters - ---------- - rhs : dtype_f - Right-hand side for the linear system. - factor : float - Abbrev. for the node-to-node stepsize (or any other factor required). - u0 : dtype_u - Initial guess for the iterative solver (not used here so far). - t : float - Current time (e.g. for time-dependent BCs). - - Returns - ------- - me : dtype_u - The solution as mesh. - """ - - if self.spectral: - me = rhs / (1.0 + factor * self.K2 * 1j) - - else: - me = self.dtype_u(self.init) - rhs_hat = self.fft.forward(rhs) - rhs_hat /= 1.0 + factor * self.K2 * 1j - me[:] = self.fft.backward(rhs_hat) - - return me + def _eval_explicit_part(self, u, t, f_expl): + f_expl[:] = self.ndim * self.c * 2j * self.xp.absolute(u) ** 2 * u + return f_expl def u_exact(self, t, **kwargs): r""" @@ -198,9 +81,9 @@ def u_exact(self, t, **kwargs): def nls_exact_1D(t, x, c): ae = 1.0 / np.sqrt(2.0) * np.exp(1j * t) if c != 0: - u = ae * ((np.cosh(t) + 1j * np.sinh(t)) / (np.cosh(t) - 1.0 / np.sqrt(2.0) * np.cos(x)) - 1.0) + u = ae * ((np.cosh(t) + 1j * np.sinh(t)) / (np.cosh(t) - 1.0 / np.sqrt(2.0) * self.xp.cos(x)) - 1.0) else: - u = np.sin(x) * np.exp(-t * 1j) + u = self.xp.sin(x) * np.exp(-t * 1j) return u @@ -261,13 +144,13 @@ def eval_f(self, u, t): if self.spectral: tmp = self.fft.backward(u) - tmpf = self.ndim * self.c * 2j * np.absolute(tmp) ** 2 * tmp + tmpf = self.ndim * self.c * 2j * self.xp.absolute(tmp) ** 2 * tmp f[:] = -self.K2 * 1j * u + self.fft.forward(tmpf) else: u_hat = self.fft.forward(u) lap_u_hat = -self.K2 * 1j * u_hat - f[:] = self.fft.backward(lap_u_hat) + self.ndim * self.c * 2j * np.absolute(u) ** 2 * u + f[:] = self.fft.backward(lap_u_hat) + self.ndim * self.c * 2j * self.xp.absolute(u) ** 2 * u self.work_counters['rhs']() return f diff --git a/pySDC/implementations/problem_classes/TestEquation_0D.py b/pySDC/implementations/problem_classes/TestEquation_0D.py index 08c0bed292..602679911f 100644 --- a/pySDC/implementations/problem_classes/TestEquation_0D.py +++ b/pySDC/implementations/problem_classes/TestEquation_0D.py @@ -1,8 +1,11 @@ +import numpy as np +import scipy.sparse as nsp + from pySDC.core.Problem import ptype, WorkCounter +from pySDC.implementations.datatype_classes.mesh import mesh -# noinspection PyUnusedLocal -class testequation0dXPU(ptype): +class testequation0d(ptype): r""" This class implements the simple test equation of the form @@ -11,9 +14,6 @@ class testequation0dXPU(ptype): for :math:`A = diag(\lambda_1, .. ,\lambda_n)`. - It is compatible with both CPU and GPU, but requires setting class attributes to select the corresponding numerical - library. Use the classmethod `get_XPU_version` to get a runnable problem class. - Parameters ---------- lambdas : sequence of array_like, optional @@ -27,72 +27,49 @@ class testequation0dXPU(ptype): Diagonal matrix containing :math:`\lambda_1,..,\lambda_n`. """ + xp = np + xsp = nsp + dtype_u = mesh + dtype_f = mesh + @classmethod - def get_XPU_version(cls, version='CPU'): + def setup_GPU(cls): """ - Get a runnable version for either CPU or GPU by specifying this as `version`. - - Parameters - ---------- - version : str - Supply "GPU" or "CPU" to obtain the desired implementation - - Returns - ------- - pySDC.Problem - A problem class implementing the desired implementation + Switch to GPU modules """ - if version == 'CPU': - return testequation0d - elif version == 'GPU': - from pySDC.implementations.problem_classes.TestEquation_0D_GPU import testequation0dGPU + from pySDC.implementations.datatype_classes.cupy_mesh import cupy_mesh + import cupy as cp + import cupyx.scipy.sparse as csp - return testequation0dGPU - else: - from pySDC.core.Errors import ParameterError + cls.xp = cp + cls.xsp = csp + cls.dtype_u = cupy_mesh + cls.dtype_f = cupy_mesh - raise ParameterError(f'Don\'t know version {version}! Please choose \'CPU\' or \'GPU\'!') - - def __init__(self, lambdas=None, u0=0.0): + def __init__(self, lambdas=None, u0=0.0, useGPU=False): """Initialization routine""" + if useGPU: + self.setup_GPU() + if lambdas is None: re = self.xp.linspace(-30, 19, 50) im = self.xp.linspace(-50, 49, 50) lambdas = self.xp.array([[complex(re[i], im[j]) for i in range(len(re))] for j in range(len(im))]).reshape( (len(re) * len(im)) ) - - assert not any(isinstance(i, list) for i in lambdas), 'ERROR: expect flat list here, got %s' % lambdas - nvars = len(lambdas) - assert nvars > 0, 'ERROR: expect at least one lambda parameter here' + lambdas = self.xp.asarray(lambdas) + assert lambdas.ndim == 1, f'expect flat list here, got {lambdas}' + nvars = lambdas.size + assert nvars > 0, 'expect at least one lambda parameter here' # invoke super init, passing number of dofs, dtype_u and dtype_f super().__init__(init=(nvars, None, self.xp.dtype('complex128'))) lambdas = self.xp.array(lambdas) - self.A = self.__get_A(lambdas, self.xsp) - self._makeAttributeAndRegister('nvars', 'lambdas', 'u0', localVars=locals(), readOnly=True) + self.A = self.xsp.diags(lambdas) + self._makeAttributeAndRegister('nvars', 'lambdas', 'u0', 'useGPU', localVars=locals(), readOnly=True) self.work_counters['rhs'] = WorkCounter() - @staticmethod - def __get_A(lambdas, xsp): - """ - Helper function to assemble FD matrix A in sparse format. - - Parameters - ---------- - lambdas : sequence of array_like - List of lambda parameters. - - Returns - ------- - scipy.sparse.csc_matrix - Diagonal matrix A in CSC format. - """ - - A = xsp.diags(lambdas) - return A - def eval_f(self, u, t): """ Routine to evaluate the right-hand side of the problem. @@ -111,7 +88,8 @@ def eval_f(self, u, t): """ f = self.dtype_f(self.init) - f[:] = self.A.dot(u) + f[:] = u + f *= self.lambdas self.work_counters['rhs']() return f @@ -135,10 +113,11 @@ def solve_system(self, rhs, factor, u0, t): me : dtype_u The solution as mesh. """ - me = self.dtype_u(self.init) - L = self.splu(self.xsp.eye(self.nvars, format='csc') - factor * self.A) - me[:] = L.solve(rhs) + L = 1 - factor * self.lambdas + L[L == 0] = 1 # to avoid potential divisions by zeros + me[:] = rhs + me /= L return me def u_exact(self, t, u_init=None, t_init=None): @@ -166,18 +145,3 @@ def u_exact(self, t, u_init=None, t_init=None): me = self.dtype_u(self.init) me[:] = u_init * self.xp.exp((t - t_init) * self.lambdas) return me - - -class testequation0d(testequation0dXPU): - """ - CPU implementation of `testequation0dXPU` - """ - - from pySDC.implementations.datatype_classes.mesh import mesh - import numpy as xp - import scipy.sparse as xsp - from scipy.sparse.linalg import splu as _splu - - dtype_u = mesh - dtype_f = mesh - splu = staticmethod(_splu) diff --git a/pySDC/implementations/problem_classes/TestEquation_0D_GPU.py b/pySDC/implementations/problem_classes/TestEquation_0D_GPU.py deleted file mode 100644 index b4bd15f51b..0000000000 --- a/pySDC/implementations/problem_classes/TestEquation_0D_GPU.py +++ /dev/null @@ -1,16 +0,0 @@ -from pySDC.implementations.problem_classes.TestEquation_0D import testequation0dXPU - - -class testequation0dGPU(testequation0dXPU): - """ - GPU implementation of `testequation0dXPU` - """ - - from pySDC.implementations.datatype_classes.cupy_mesh import cupy_mesh - import cupy as xp - import cupyx.scipy.sparse as xsp - from cupyx.scipy.sparse.linalg import splu as _splu - - dtype_u = cupy_mesh - dtype_f = cupy_mesh - splu = staticmethod(_splu) diff --git a/pySDC/implementations/problem_classes/generic_MPIFFT_Laplacian.py b/pySDC/implementations/problem_classes/generic_MPIFFT_Laplacian.py new file mode 100644 index 0000000000..eb3b0bffa2 --- /dev/null +++ b/pySDC/implementations/problem_classes/generic_MPIFFT_Laplacian.py @@ -0,0 +1,181 @@ +import numpy as np +from mpi4py import MPI +from mpi4py_fft import PFFT + +from pySDC.core.Errors import ProblemError +from pySDC.core.Problem import ptype, WorkCounter +from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh + +from mpi4py_fft import newDistArray + + +class IMEX_Laplacian_MPIFFT(ptype): + r""" + Generic base class for IMEX problems using a spectral method to solve the Laplacian implicitly and a possible rest + explicitly. The FFTs are done with``mpi4py-fft`` [1]_. + + Parameters + ---------- + nvars : tuple, optional + Spatial resolution + spectral : bool, optional + If True, the solution is computed in spectral space. + L : float, optional + Denotes the period of the function to be approximated for the Fourier transform. + alpha : float, optional + Multiplicative factor before the Laplacian + comm : MPI.COMM_World + Communicator for parallelisation. + + Attributes + ---------- + fft : PFFT + Object for parallel FFT transforms. + X : mesh-grid + Grid coordinates in real space. + K2 : matrix + Laplace operator in spectral space. + + References + ---------- + .. [1] Lisandro Dalcin, Mikael Mortensen, David E. Keyes. Fast parallel multidimensional FFT using advanced MPI. + Journal of Parallel and Distributed Computing (2019). + """ + + dtype_u = mesh + dtype_f = imex_mesh + + xp = np + + def __init__(self, nvars=None, spectral=False, L=2 * np.pi, alpha=1.0, comm=MPI.COMM_WORLD, dtype='d', x0=0.0): + """Initialization routine""" + + if nvars is None: + nvars = (128, 128) + + if not (isinstance(nvars, tuple) and len(nvars) > 1): + raise ProblemError('Need at least two dimensions for distributed FFTs') + + # Creating FFT structure + self.ndim = len(nvars) + axes = tuple(range(self.ndim)) + self.fft = PFFT(comm, list(nvars), axes=axes, dtype=dtype, collapse=True) + + # get test data to figure out type and dimensions + tmp_u = newDistArray(self.fft, spectral) + + L = np.array([L] * self.ndim, dtype=float) + + # invoke super init, passing the communicator and the local dimensions as init + super().__init__(init=(tmp_u.shape, comm, tmp_u.dtype)) + self._makeAttributeAndRegister( + 'nvars', 'spectral', 'L', 'alpha', 'comm', 'x0', localVars=locals(), readOnly=True + ) + + # get local mesh + X = self.xp.ogrid[self.fft.local_slice(False)] + N = self.fft.global_shape() + for i in range(len(N)): + X[i] = x0 + (X[i] * L[i] / N[i]) + self.X = [self.xp.broadcast_to(x, self.fft.shape(False)) for x in X] + + # get local wavenumbers and Laplace operator + s = self.fft.local_slice() + N = self.fft.global_shape() + k = [self.xp.fft.fftfreq(n, 1.0 / n).astype(int) for n in N] + K = [ki[si] for ki, si in zip(k, s)] + Ks = self.xp.meshgrid(*K, indexing='ij', sparse=True) + Lp = 2 * np.pi / self.L + for i in range(self.ndim): + Ks[i] = (Ks[i] * Lp[i]).astype(float) + K = [self.xp.broadcast_to(k, self.fft.shape(True)) for k in Ks] + K = self.xp.array(K).astype(float) + self.K2 = self.xp.sum(K * K, 0, dtype=float) # Laplacian in spectral space + + # Need this for diagnostics + self.dx = self.L[0] / nvars[0] + self.dy = self.L[1] / nvars[1] + + # work counters + self.work_counters['rhs'] = WorkCounter() + + def eval_f(self, u, t): + """ + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time at which the numerical solution is computed. + + Returns + ------- + f : dtype_f + The right-hand side of the problem. + """ + + f = self.dtype_f(self.init) + + f.impl[:] = self._eval_Laplacian(u, f.impl) + + if self.spectral: + tmp = self.fft.backward(u) + tmp[:] = self._eval_explicit_part(tmp, t, tmp) + f.expl[:] = self.fft.forward(tmp) + + else: + f.expl[:] = self._eval_explicit_part(u, t, f.expl) + + self.work_counters['rhs']() + return f + + def _eval_Laplacian(self, u, f_impl, alpha=None): + alpha = alpha if alpha else self.alpha + if self.spectral: + f_impl[:] = -alpha * self.K2 * u + else: + u_hat = self.fft.forward(u) + lap_u_hat = -alpha * self.K2 * u_hat + f_impl[:] = self.fft.backward(lap_u_hat, f_impl) + return f_impl + + def _eval_explicit_part(self, u, t, f_expl): + return f_expl + + def solve_system(self, rhs, factor, u0, t): + """ + Simple FFT solver for the diffusion part. + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the linear system. + factor : float + Abbrev. for the node-to-node stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver (not used here so far). + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + me : dtype_u + The solution as mesh. + """ + me = self.dtype_u(self.init) + me[:] = self._invert_Laplacian(me, factor, rhs) + + return me + + def _invert_Laplacian(self, me, factor, rhs, alpha=None): + alpha = alpha if alpha else self.alpha + if self.spectral: + me[:] = rhs / (1.0 + factor * alpha * self.K2) + + else: + rhs_hat = self.fft.forward(rhs) + rhs_hat /= 1.0 + factor * alpha * self.K2 + me[:] = self.fft.backward(rhs_hat) + return me diff --git a/pySDC/implementations/problem_classes/odeScalar.py b/pySDC/implementations/problem_classes/odeScalar.py new file mode 100644 index 0000000000..7463f59f3f --- /dev/null +++ b/pySDC/implementations/problem_classes/odeScalar.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Implementation of scalar test problem ODEs. + + +Reference : + +Van der Houwen, P. J., & Sommeijer, B. P. (1991). Iterated Runge–Kutta methods +on parallel computers. SIAM journal on scientific and statistical computing, +12(5), 1000-1028. +""" +import numpy as np + +from pySDC.core.Errors import ProblemError +from pySDC.core.Problem import ptype, WorkCounter +from pySDC.implementations.datatype_classes.mesh import mesh + + +class ProtheroRobinson(ptype): + r""" + Implement the Prothero-Robinson problem: + + .. math:: + \frac{du}{dt} = -\frac{u-g(t)}{\epsilon} + \frac{dg}{dt}, \quad u(0) = g(0)., + + with :math:`\epsilon` a stiffness parameter, that makes the problem more stiff + the smaller it is (usual taken value is :math:`\epsilon=1e^{-3}`). + Exact solution is given by :math:`u(t)=g(t)`, and this implementation uses + :math:`g(t)=\cos(t)`. + + Implement also the non-linear form of this problem: + + .. math:: + \frac{du}{dt} = -\frac{u^3-g(t)^3}{\epsilon} + \frac{dg}{dt}, \quad u(0) = g(0). + + To use an other exact solution, one just have to derivate this class + and overload the `g` and `dg` methods. For instance, + to use :math:`g(t)=e^{-0.2*t}`, define and use the following class: + + >>> class MyProtheroRobinson(ProtheroRobinson): + >>> + >>> def g(self, t): + >>> return np.exp(-0.2 * t) + >>> + >>> def dg(self, t): + >>> return (-0.2) * np.exp(-0.2 * t) + + Parameters + ---------- + epsilon : float, optional + Stiffness parameter. The default is 1e-3. + nonLinear : bool, optional + Wether or not to use the non-linear form of the problem. The default is False. + newton_maxiter : int, optional + Maximum number of Newton iteration in solve_system. The default is 200. + newton_tol : float, optional + Residuum tolerance for Newton iteration in solve_system. The default is 5e-11. + stop_at_nan : bool, optional + Wheter to stop or not solve_system when getting NAN. The default is True. + + Reference + --------- + A. Prothero and A. Robinson, On the stability and accuracy of one-step methods for solving + stiff systems of ordinary differential equations, Mathematics of Computation, 28 (1974), + pp. 145–162. + """ + + dtype_u = mesh + dtype_f = mesh + + def __init__(self, epsilon=1e-3, nonLinear=False, newton_maxiter=200, newton_tol=5e-11, stop_at_nan=True): + nvars = 1 + super().__init__((nvars, None, np.dtype('float64'))) + + self.f = self.f_NONLIN if nonLinear else self.f_LIN + self.jac = self.jac_NONLIN if nonLinear else self.jac_LIN + self._makeAttributeAndRegister( + 'epsilon', 'nonLinear', 'newton_maxiter', 'newton_tol', 'stop_at_nan', localVars=locals(), readOnly=True + ) + self.work_counters['newton'] = WorkCounter() + self.work_counters['rhs'] = WorkCounter() + + # ------------------------------------------------------------------------- + # g function (analytical solution), and its first derivative + # ------------------------------------------------------------------------- + def g(self, t): + return np.cos(t) + + def dg(self, t): + return -np.sin(t) + + # ------------------------------------------------------------------------- + # f(u,t) and Jacobian functions + # ------------------------------------------------------------------------- + def f(self, u, t): + raise NotImplementedError() + + def f_LIN(self, u, t): + return -self.epsilon ** (-1) * (u - self.g(t)) + self.dg(t) + + def f_NONLIN(self, u, t): + return -self.epsilon ** (-1) * (u**3 - self.g(t) ** 3) + self.dg(t) + + def jac(self, u, t): + raise NotImplementedError() + + def jac_LIN(self, u, t): + return -self.epsilon ** (-1) + + def jac_NONLIN(self, u, t): + return -self.epsilon ** (-1) * 3 * u**2 + + # ------------------------------------------------------------------------- + # pySDC required methods + # ------------------------------------------------------------------------- + def u_exact(self, t, u_init=None, t_init=None): + r""" + Routine to return initial conditions or exact solution. + + Parameters + ---------- + t : float + Time at which the exact solution is computed. + u_init : dtype_u + Initial conditions for getting the exact solution. + t_init : float + The starting time. + + Returns + ------- + u : dtype_u + The exact solution. + """ + u = self.dtype_u(self.init) + u[:] = self.g(t) + return u + + def eval_f(self, u, t): + """ + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed (not used here). + + Returns + ------- + f : dtype_f + The right-hand side of the problem (one component). + """ + + f = self.dtype_f(self.init) + f[:] = self.f(u, t) + self.work_counters['rhs']() + return f + + def solve_system(self, rhs, dt, u0, t): + """ + Simple Newton solver for the nonlinear equation + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the nonlinear system. + dt : float + Abbrev. for the node-to-node stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Time of the updated solution (e.g. for time-dependent BCs). + + Returns + ------- + u : dtype_u + The solution as mesh. + """ + # create new mesh object from u0 and set initial values for iteration + u = self.dtype_u(u0) + + # start newton iteration + n, res = 0, np.inf + while n < self.newton_maxiter: + # form the function g with g(u) = 0 + g = u - dt * self.f(u, t) - rhs + + # if g is close to 0, then we are done + res = np.linalg.norm(g, np.inf) + if res < self.newton_tol or np.isnan(res): + break + + # assemble dg/du + dg = 1 - dt * self.jac(u, t) + + # newton update: u1 = u0 - g/dg + u -= dg ** (-1) * g + + # increase iteration count and work counter + n += 1 + self.work_counters['newton']() + + if np.isnan(res) and self.stop_at_nan: + raise ProblemError('Newton got nan after %i iterations, aborting...' % n) + elif np.isnan(res): # pragma: no cover + self.logger.warning('Newton got nan after %i iterations...' % n) + + if n == self.newton_maxiter: + raise ProblemError('Newton did not converge after %i iterations, error is %s' % (n, res)) + + return u diff --git a/pySDC/implementations/problem_classes/odeSystem.py b/pySDC/implementations/problem_classes/odeSystem.py new file mode 100644 index 0000000000..1c5127ea2d --- /dev/null +++ b/pySDC/implementations/problem_classes/odeSystem.py @@ -0,0 +1,926 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Implementation of systems test problem ODEs. + + +Reference : + +Van der Houwen, P. J., & Sommeijer, B. P. (1991). Iterated Runge–Kutta methods +on parallel computers. SIAM journal on scientific and statistical computing, +12(5), 1000-1028. +""" +import numpy as np + +from pySDC.core.Errors import ProblemError +from pySDC.core.Problem import ptype, WorkCounter +from pySDC.implementations.datatype_classes.mesh import mesh + + +class ProtheroRobinsonAutonomous(ptype): + r""" + Implement the Prothero-Robinson problem into autonomous form: + + .. math:: + \begin{eqnarray*} + \frac{du}{dt} &=& -\frac{u^3-g(v)^3}{\epsilon} + \frac{dg}{dv}, &\quad u(0) = g(0),\\ + \frac{dv}{dt} &=& 1, &\quad v(0) = 0, + \end{eqnarray*} + + with :math:`\epsilon` a stiffness parameter, that makes the problem more stiff + the smaller it is (usual taken value is :math:`\epsilon=1e^{-3}`). + Exact solution is given by :math:`u(t)=g(t),\;v(t)=t`, and this implementation uses + :math:`g(t)=\cos(t)`. + + Implement also the non-linear form of this problem: + + .. math:: + \frac{du}{dt} = -\frac{u^3-g(v)^3}{\epsilon} + \frac{dg}{dv}, \quad u(0) = g(0). + + To use an other exact solution, one just have to derivate this class + and overload the `g`, `dg` and `dg2` methods. For instance, + to use :math:`g(t)=e^{-0.2t}`, define and use the following class: + + >>> class MyProtheroRobinson(ProtheroRobinsonAutonomous): + >>> + >>> def g(self, t): + >>> return np.exp(-0.2 * t) + >>> + >>> def dg(self, t): + >>> return (-0.2) * np.exp(-0.2 * t) + >>> + >>> def dg2(self, t): + >>> return (-0.2) ** 2 * np.exp(-0.2 * t) + + Parameters + ---------- + epsilon : float, optional + Stiffness parameter. The default is 1e-3. + nonLinear : bool, optional + Wether or not to use the non-linear form of the problem. The default is False. + newton_maxiter : int, optional + Maximum number of Newton iteration in solve_system. The default is 200. + newton_tol : float, optional + Residuum tolerance for Newton iteration in solve_system. The default is 5e-11. + stop_at_nan : bool, optional + Wheter to stop or not solve_system when getting NAN. The default is True. + + Reference + --------- + A. Prothero and A. Robinson, On the stability and accuracy of one-step methods for solving + stiff systems of ordinary differential equations, Mathematics of Computation, 28 (1974), + pp. 145–162. + """ + + dtype_u = mesh + dtype_f = mesh + + def __init__(self, epsilon=1e-3, nonLinear=False, newton_maxiter=200, newton_tol=5e-11, stop_at_nan=True): + nvars = 2 + super().__init__((nvars, None, np.dtype('float64'))) + + self.f = self.f_NONLIN if nonLinear else self.f_LIN + self.dgInv = self.dgInv_NONLIN if nonLinear else self.dgInv_LIN + self._makeAttributeAndRegister( + 'epsilon', 'nonLinear', 'newton_maxiter', 'newton_tol', 'stop_at_nan', localVars=locals(), readOnly=True + ) + self.work_counters['newton'] = WorkCounter() + self.work_counters['rhs'] = WorkCounter() + + # ------------------------------------------------------------------------- + # g function (analytical solution), and its first and second derivative + # ------------------------------------------------------------------------- + def g(self, t): + return np.cos(t) + + def dg(self, t): + return -np.sin(t) + + def dg2(self, t): + return -np.cos(t) + + # ------------------------------------------------------------------------- + # f(u,t) and Jacobian functions + # ------------------------------------------------------------------------- + def f(self, u, t): + raise NotImplementedError() + + def f_LIN(self, u, t): + return -self.epsilon ** (-1) * (u - self.g(t)) + self.dg(t) + + def f_NONLIN(self, u, t): + return -self.epsilon ** (-1) * (u**3 - self.g(t) ** 3) + self.dg(t) + + def dgInv(self, u, t): + raise NotImplementedError() + + def dgInv_LIN(self, u, t, dt): + e = self.epsilon + g1, g2 = self.dg(t), self.dg2(t) + return np.array([[1 / (dt / e + 1), (dt * g2 + dt * g1 / e) / (dt / e + 1)], [0, 1]]) + + def dgInv_NONLIN(self, u, t, dt): + e = self.epsilon + g, g1, g2 = self.g(t), self.dg(t), self.dg2(t) + return np.array( + [[1 / (3 * dt * u**2 / e + 1), (dt * g2 + 3 * dt * g**2 * g1 / e) / (3 * dt * u**2 / e + 1)], [0, 1]] + ) + + # ------------------------------------------------------------------------- + # pySDC required methods + # ------------------------------------------------------------------------- + def u_exact(self, t, u_init=None, t_init=None): + r""" + Routine to return initial conditions or exact solutions. + + Parameters + ---------- + t : float + Time at which the exact solution is computed. + u_init : dtype_u + Initial conditions for getting the exact solution. + t_init : float + The starting time. + + Returns + ------- + u : dtype_u + The exact solution. + """ + u = self.dtype_u(self.init) + u[0] = self.g(t) + u[1] = t + return u + + def eval_f(self, u, t): + """ + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed (not used here). + + Returns + ------- + f : dtype_f + The right-hand side of the problem (one component). + """ + + f = self.dtype_f(self.init) + u, t = u + f[0] = self.f(u, t) + f[1] = 1 + self.work_counters['rhs']() + return f + + def solve_system(self, rhs, dt, u0, t): + """ + Simple Newton solver for the nonlinear equation + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the nonlinear system. + dt : float + Abbrev. for the node-to-node stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Time of the updated solution (e.g. for time-dependent BCs). + + Returns + ------- + u : dtype_u + The solution as mesh. + """ + # create new mesh object from u0 and set initial values for iteration + u = self.dtype_u(u0) + + # start newton iteration + n, res = 0, np.inf + while n < self.newton_maxiter: + # evaluate RHS + f = self.dtype_u(u) + f[0] = self.f(*u) + f[1] = 1 + + # form the function g with g(u) = 0 + g = u - dt * f - rhs + + # if g is close to 0, then we are done + res = np.linalg.norm(g, np.inf) + if res < self.newton_tol or np.isnan(res): + break + + # assemble (dg/du)^{-1} + dgInv = self.dgInv(u[0], u[1], dt) + # newton update: u1 = u0 - g/dg + u -= dgInv @ g + + # increase iteration count and work counter + n += 1 + self.work_counters['newton']() + + if np.isnan(res) and self.stop_at_nan: + raise ProblemError('Newton got nan after %i iterations, aborting...' % n) + elif np.isnan(res): # pragma: no cover + self.logger.warning('Newton got nan after %i iterations...' % n) + + if n == self.newton_maxiter: + raise ProblemError('Newton did not converge after %i iterations, error is %s' % (n, res)) + + return u + + +class Kaps(ptype): + r""" + Implement the Kaps problem: + + .. math:: + \begin{eqnarray*} + \frac{du}{dt} &=& -(2+\epsilon^{-1})u + \frac{v^2}{\epsilon}, &\quad u(0) = 1,\\ + \frac{dv}{dt} &=& u - v(1+v), &\quad v(0) = 1, + \end{eqnarray*} + + with :math:`\epsilon` a stiffness parameter, that makes the problem more stiff + the smaller it is (usual taken value is :math:`\epsilon=1e^{-3}`). + Exact solution is given by :math:`u(t)=e^{-2t},\;v(t)=e^{-t}`. + + Parameters + ---------- + epsilon : float, optional + Stiffness parameter. The default is 1e-3. + newton_maxiter : int, optional + Maximum number of Newton iteration in solve_system. The default is 200. + newton_tol : float, optional + Residuum tolerance for Newton iteration in solve_system. The default is 5e-11. + stop_at_nan : bool, optional + Wheter to stop or not solve_system when getting NAN. The default is True. + + Reference + --------- + Van der Houwen, P. J., & Sommeijer, B. P. (1991). Iterated Runge–Kutta methods + on parallel computers. SIAM journal on scientific and statistical computing, + 12(5), 1000-1028. + """ + + dtype_u = mesh + dtype_f = mesh + + def __init__(self, epsilon=1e-3, newton_maxiter=200, newton_tol=5e-11, stop_at_nan=True): + nvars = 2 + super().__init__((nvars, None, np.dtype('float64'))) + + self._makeAttributeAndRegister( + 'epsilon', 'newton_maxiter', 'newton_tol', 'stop_at_nan', localVars=locals(), readOnly=True + ) + self.work_counters['newton'] = WorkCounter() + self.work_counters['rhs'] = WorkCounter() + + def u_exact(self, t, u_init=None, t_init=None): + r""" + Routine to return initial conditions or exact solutions. + + Parameters + ---------- + t : float + Time at which the exact solution is computed. + u_init : dtype_u + Initial conditions for getting the exact solution. + t_init : float + The starting time. + + Returns + ------- + u : dtype_u + The exact solution. + """ + u = self.dtype_u(self.init) + u[:] = [np.exp(-2 * t), np.exp(-t)] + return u + + def eval_f(self, u, t): + """ + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed (not used here). + + Returns + ------- + f : dtype_f + The right-hand side of the problem (one component). + """ + f = self.dtype_f(self.init) + eps = self.epsilon + x, y = u + + f[:] = [-(2 + 1 / eps) * x + y**2 / eps, x - y * (1 + y)] + self.work_counters['rhs']() + return f + + def solve_system(self, rhs, dt, u0, t): + """ + Simple Newton solver for the nonlinear equation + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the nonlinear system. + dt : float + Abbrev. for the node-to-node stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + u : dtype_u + The solution as mesh. + """ + # create new mesh object from u0 and set initial values for iteration + u = self.dtype_u(u0) + eps = self.epsilon + + # start newton iteration + n, res = 0, np.inf + while n < self.newton_maxiter: + x, y = u + f = np.array([-(2 + 1 / eps) * x + y**2 / eps, x - y * (1 + y)]) + + # form the function g with g(u) = 0 + g = u - dt * f - rhs + + # if g is close to 0, then we are done + res = np.linalg.norm(g, np.inf) + if res < self.newton_tol or np.isnan(res): + break + + # assemble (dg/du)^(-1) + prefactor = 4 * dt**2 * eps * y + 2 * dt**2 * eps + dt**2 + 2 * dt * eps * y + 3 * dt * eps + dt + eps + dgInv = ( + 1 + / prefactor + * np.array([[2 * dt * eps * y + dt * eps + eps, 2 * dt * y], [dt * eps, 2 * dt * eps + dt + eps]]) + ) + + # newton update: u1 = u0 - g/dg + u -= dgInv @ g + + # increase iteration count and work counter + n += 1 + self.work_counters['newton']() + + if np.isnan(res) and self.stop_at_nan: + raise ProblemError('Newton got nan after %i iterations, aborting...' % n) + elif np.isnan(res): # pragma: no cover + self.logger.warning('Newton got nan after %i iterations...' % n) + + if n == self.newton_maxiter: + raise ProblemError('Newton did not converge after %i iterations, error is %s' % (n, res)) + + return u + + +class ChemicalReaction3Var(ptype): + r""" + Chemical reaction with three components, modeled by the non-linear system: + + .. math:: + \frac{d{\bf u}}{dt} = + \begin{pmatrix} + 0.013+1000u_3 & 0 & 0 \\ + 0 & 2500u_3 0 \\ + 0.013 & 0 & 1000u_1 + 2500u_2 + \end{pmatrix} + {\bf u}, + + with initial solution :math:`u(0)=(0.990731920827, 1.009264413846, -0.366532612659e-5)`. + + Parameters + ---------- + newton_maxiter : int, optional + Maximum number of Newton iteration in solve_system. The default is 200. + newton_tol : float, optional + Residuum tolerance for Newton iteration in solve_system. The default is 5e-11. + stop_at_nan : bool, optional + Wheter to stop or not solve_system when getting NAN. The default is True. + + Reference + --------- + Van der Houwen, P. J., & Sommeijer, B. P. (1991). Iterated Runge–Kutta methods + on parallel computers. SIAM journal on scientific and statistical computing, + 12(5), 1000-1028. + """ + + dtype_u = mesh + dtype_f = mesh + + def __init__(self, newton_maxiter=200, newton_tol=5e-11, stop_at_nan=True): + nvars = 3 + u0 = (0.990731920827, 1.009264413846, -0.366532612659e-5) + super().__init__((nvars, None, np.dtype('float64'))) + + self._makeAttributeAndRegister( + 'u0', 'newton_maxiter', 'newton_tol', 'stop_at_nan', localVars=locals(), readOnly=True + ) + self.work_counters['newton'] = WorkCounter() + self.work_counters['rhs'] = WorkCounter() + + def u_exact(self, t, u_init=None, t_init=None): + r""" + Routine to return initial conditions or to approximate exact solution using ``SciPy``. + + Parameters + ---------- + t : float + Time at which the approximated exact solution is computed. + u_init : pySDC.implementations.problem_classes.Lorenz.dtype_u + Initial conditions for getting the exact solution. + t_init : float + The starting time. + + Returns + ------- + me : dtype_u + The approximated exact solution. + """ + + me = self.dtype_u(self.init) + + if t > 0: + + def eval_rhs(t, u): + r""" + Evaluate the right hand side, but switch the arguments for ``SciPy``. + + Args: + t (float): Time + u (numpy.ndarray): Solution at time t + + Returns: + (numpy.ndarray): Right hand side evaluation + """ + return self.eval_f(u, t) + + me[:] = self.generate_scipy_reference_solution(eval_rhs, t, u_init, t_init) + else: + me[:] = self.u0 + return me + + def eval_f(self, u, t): + """ + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed (not used here). + + Returns + ------- + f : dtype_f + The right-hand side of the problem (one component). + """ + f = self.dtype_f(self.init) + c1, c2, c3 = u + + f[:] = -np.array([0.013 * c1 + 1000 * c3 * c1, 2500 * c3 * c2, 0.013 * c1 + 1000 * c1 * c3 + 2500 * c2 * c3]) + self.work_counters['rhs']() + return f + + def solve_system(self, rhs, dt, u0, t): + """ + Simple Newton solver for the nonlinear equation + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the nonlinear system. + dt : float + Abbrev. for the node-to-node stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + u : dtype_u + The solution as mesh. + """ + # create new mesh object from u0 and set initial values for iteration + u = self.dtype_u(u0) + + # start newton iteration + n, res = 0, np.inf + while n < self.newton_maxiter: + c1, c2, c3 = u + f = -np.array([0.013 * c1 + 1000 * c3 * c1, 2500 * c3 * c2, 0.013 * c1 + 1000 * c1 * c3 + 2500 * c2 * c3]) + + # form the function g with g(u) = 0 + g = u - dt * f - rhs + + # if g is close to 0, then we are done + res = np.linalg.norm(g, np.inf) + if res < self.newton_tol or np.isnan(res): + break + + # assemble (dg/du)^(-1) + dgInv = np.array( + [ + [ + ( + 2500000000.0 * c1 * c3**2 * dt**3 + + 32500.0 * c1 * c3 * dt**3 + + 3500000.0 * c1 * c3 * dt**2 + + 13.0 * c1 * dt**2 + + 1000.0 * c1 * dt + + 2500000.0 * c2 * c3 * dt**2 + + 32.5 * c2 * dt**2 + + 2500.0 * c2 * dt + + 2500000.0 * c3**2 * dt**2 + + 32.5 * c3 * dt**2 + + 3500.0 * c3 * dt + + 0.013 * dt + + 1.0 + ) + / ( + 2500000000.0 * c1 * c3**2 * dt**3 + + 32500.0 * c1 * c3 * dt**3 + + 3500000.0 * c1 * c3 * dt**2 + + 13.0 * c1 * dt**2 + + 1000.0 * c1 * dt + + 2500000000.0 * c2 * c3**2 * dt**3 + + 65000.0 * c2 * c3 * dt**3 + + 5000000.0 * c2 * c3 * dt**2 + + 0.4225 * c2 * dt**3 + + 65.0 * c2 * dt**2 + + 2500.0 * c2 * dt + + 2500000000.0 * c3**3 * dt**3 + + 65000.0 * c3**2 * dt**3 + + 6000000.0 * c3**2 * dt**2 + + 0.4225 * c3 * dt**3 + + 91.0 * c3 * dt**2 + + 4500.0 * c3 * dt + + 0.000169 * dt**2 + + 0.026 * dt + + 1.0 + ), + (2500000000.0 * c1 * c3**2 * dt**3 + 32500.0 * c1 * c3 * dt**3 + 2500000.0 * c1 * c3 * dt**2) + / ( + 2500000000.0 * c1 * c3**2 * dt**3 + + 32500.0 * c1 * c3 * dt**3 + + 3500000.0 * c1 * c3 * dt**2 + + 13.0 * c1 * dt**2 + + 1000.0 * c1 * dt + + 2500000000.0 * c2 * c3**2 * dt**3 + + 65000.0 * c2 * c3 * dt**3 + + 5000000.0 * c2 * c3 * dt**2 + + 0.4225 * c2 * dt**3 + + 65.0 * c2 * dt**2 + + 2500.0 * c2 * dt + + 2500000000.0 * c3**3 * dt**3 + + 65000.0 * c3**2 * dt**3 + + 6000000.0 * c3**2 * dt**2 + + 0.4225 * c3 * dt**3 + + 91.0 * c3 * dt**2 + + 4500.0 * c3 * dt + + 0.000169 * dt**2 + + 0.026 * dt + + 1.0 + ), + ( + -2500000000.0 * c1 * c3**2 * dt**3 + - 32500.0 * c1 * c3 * dt**3 + - 3500000.0 * c1 * c3 * dt**2 + - 13.0 * c1 * dt**2 + - 1000.0 * c1 * dt + ) + / ( + 2500000000.0 * c1 * c3**2 * dt**3 + + 32500.0 * c1 * c3 * dt**3 + + 3500000.0 * c1 * c3 * dt**2 + + 13.0 * c1 * dt**2 + + 1000.0 * c1 * dt + + 2500000000.0 * c2 * c3**2 * dt**3 + + 65000.0 * c2 * c3 * dt**3 + + 5000000.0 * c2 * c3 * dt**2 + + 0.4225 * c2 * dt**3 + + 65.0 * c2 * dt**2 + + 2500.0 * c2 * dt + + 2500000000.0 * c3**3 * dt**3 + + 65000.0 * c3**2 * dt**3 + + 6000000.0 * c3**2 * dt**2 + + 0.4225 * c3 * dt**3 + + 91.0 * c3 * dt**2 + + 4500.0 * c3 * dt + + 0.000169 * dt**2 + + 0.026 * dt + + 1.0 + ), + ], + [ + (6250000000.0 * c2 * c3 * dt**2 + 81250.0 * c2 * dt**2) + / ( + 6250000000.0 * c1 * c3 * dt**2 + + 2500000.0 * c1 * dt + + 6250000000.0 * c2 * c3 * dt**2 + + 81250.0 * c2 * dt**2 + + 6250000.0 * c2 * dt + + 6250000000.0 * c3**2 * dt**2 + + 81250.0 * c3 * dt**2 + + 8750000.0 * c3 * dt + + 32.5 * dt + + 2500.0 + ), + ( + 2500000.0 * c1 * dt + + 6250000000.0 * c2 * c3 * dt**2 + + 81250.0 * c2 * dt**2 + + 6250000.0 * c2 * dt + + 2500000.0 * c3 * dt + + 32.5 * dt + + 2500.0 + ) + / ( + 6250000000.0 * c1 * c3 * dt**2 + + 2500000.0 * c1 * dt + + 6250000000.0 * c2 * c3 * dt**2 + + 81250.0 * c2 * dt**2 + + 6250000.0 * c2 * dt + + 6250000000.0 * c3**2 * dt**2 + + 81250.0 * c3 * dt**2 + + 8750000.0 * c3 * dt + + 32.5 * dt + + 2500.0 + ), + (-6250000000.0 * c2 * c3 * dt**2 - 81250.0 * c2 * dt**2 - 6250000.0 * c2 * dt) + / ( + 6250000000.0 * c1 * c3 * dt**2 + + 2500000.0 * c1 * dt + + 6250000000.0 * c2 * c3 * dt**2 + + 81250.0 * c2 * dt**2 + + 6250000.0 * c2 * dt + + 6250000000.0 * c3**2 * dt**2 + + 81250.0 * c3 * dt**2 + + 8750000.0 * c3 * dt + + 32.5 * dt + + 2500.0 + ), + ], + [ + (-2500000.0 * c3**2 * dt**2 - 32.5 * c3 * dt**2 - 1000.0 * c3 * dt - 0.013 * dt) + / ( + 2500000.0 * c1 * c3 * dt**2 + + 1000.0 * c1 * dt + + 2500000.0 * c2 * c3 * dt**2 + + 32.5 * c2 * dt**2 + + 2500.0 * c2 * dt + + 2500000.0 * c3**2 * dt**2 + + 32.5 * c3 * dt**2 + + 3500.0 * c3 * dt + + 0.013 * dt + + 1.0 + ), + (-2500000.0 * c3**2 * dt**2 - 32.5 * c3 * dt**2 - 2500.0 * c3 * dt) + / ( + 2500000.0 * c1 * c3 * dt**2 + + 1000.0 * c1 * dt + + 2500000.0 * c2 * c3 * dt**2 + + 32.5 * c2 * dt**2 + + 2500.0 * c2 * dt + + 2500000.0 * c3**2 * dt**2 + + 32.5 * c3 * dt**2 + + 3500.0 * c3 * dt + + 0.013 * dt + + 1.0 + ), + (2500000.0 * c3**2 * dt**2 + 32.5 * c3 * dt**2 + 3500.0 * c3 * dt + 0.013 * dt + 1.0) + / ( + 2500000.0 * c1 * c3 * dt**2 + + 1000.0 * c1 * dt + + 2500000.0 * c2 * c3 * dt**2 + + 32.5 * c2 * dt**2 + + 2500.0 * c2 * dt + + 2500000.0 * c3**2 * dt**2 + + 32.5 * c3 * dt**2 + + 3500.0 * c3 * dt + + 0.013 * dt + + 1.0 + ), + ], + ] + ) + + # newton update: u1 = u0 - g/dg + u -= dgInv @ g + + # increase iteration count and work counter + n += 1 + self.work_counters['newton']() + + if np.isnan(res) and self.stop_at_nan: + raise ProblemError('Newton got nan after %i iterations, aborting...' % n) + elif np.isnan(res): # pragma: no cover + self.logger.warning('Newton got nan after %i iterations...' % n) + + if n == self.newton_maxiter: + raise ProblemError('Newton did not converge after %i iterations, error is %s' % (n, res)) + + return u + + +class JacobiElliptic(ptype): + r""" + Implement the Jacobi Elliptic non-linear problem: + + .. math:: + \begin{eqnarray*} + \frac{du}{dt} &=& vw, &\quad u(0) = 0, \\ + \frac{dv}{dt} &=& -uw, &\quad v(0) = 1, \\ + \frac{dw}{dt} &=& -0.51uv, &\quad w(0) = 1. + \end{eqnarray*} + + Parameters + ---------- + newton_maxiter : int, optional + Maximum number of Newton iteration in solve_system. The default is 200. + newton_tol : float, optional + Residuum tolerance for Newton iteration in solve_system. The default is 5e-11. + stop_at_nan : bool, optional + Wheter to stop or not solve_system when getting NAN. The default is True. + + Reference + --------- + Van Der Houwen, P. J., Sommeijer, B. P., & Van Der Veen, W. A. (1995). + Parallel iteration across the steps of high-order Runge-Kutta methods for + nonstiff initial value problems. Journal of computational and applied + mathematics, 60(3), 309-329. + """ + + dtype_u = mesh + dtype_f = mesh + + def __init__(self, newton_maxiter=200, newton_tol=5e-11, stop_at_nan=True): + nvars = 3 + u0 = (0.0, 1.0, 1.0) + super().__init__((nvars, None, np.dtype('float64'))) + + self._makeAttributeAndRegister( + 'u0', 'newton_maxiter', 'newton_tol', 'stop_at_nan', localVars=locals(), readOnly=True + ) + self.work_counters['newton'] = WorkCounter() + self.work_counters['rhs'] = WorkCounter() + + def u_exact(self, t, u_init=None, t_init=None): + r""" + Routine to return initial conditions or to approximate exact solution using ``SciPy``. + + Parameters + ---------- + t : float + Time at which the approximated exact solution is computed. + u_init : pySDC.implementations.problem_classes.Lorenz.dtype_u + Initial conditions for getting the exact solution. + t_init : float + The starting time. + + Returns + ------- + me : dtype_u + The approximated exact solution. + """ + + me = self.dtype_u(self.init) + + if t > 0: + + def eval_rhs(t, u): + r""" + Evaluate the right hand side, but switch the arguments for ``SciPy``. + + Args: + t (float): Time + u (numpy.ndarray): Solution at time t + + Returns: + (numpy.ndarray): Right hand side evaluation + """ + return self.eval_f(u, t) + + me[:] = self.generate_scipy_reference_solution(eval_rhs, t, u_init, t_init) + else: + me[:] = self.u0 + return me + + def eval_f(self, u, t): + """ + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed (not used here). + + Returns + ------- + f : dtype_f + The right-hand side of the problem (one component). + """ + f = self.dtype_f(self.init) + u1, u2, u3 = u + + f[:] = np.array([u2 * u3, -u1 * u3, -0.51 * u1 * u2]) + self.work_counters['rhs']() + return f + + def solve_system(self, rhs, dt, u0, t): + """ + Simple Newton solver for the nonlinear equation + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the nonlinear system. + dt : float + Abbrev. for the node-to-node stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + u : dtype_u + The solution as mesh. + """ + # create new mesh object from u0 and set initial values for iteration + u = self.dtype_u(u0) + + # start newton iteration + n, res = 0, np.inf + while n < self.newton_maxiter: + u1, u2, u3 = u + f = np.array([u2 * u3, -u1 * u3, -0.51 * u1 * u2]) + + # form the function g with g(u) = 0 + g = u - dt * f - rhs + + # if g is close to 0, then we are done + res = np.linalg.norm(g, np.inf) + if res < self.newton_tol or np.isnan(res): + break + + # assemble (dg/du)^(-1) + dgInv = np.array( + [ + [ + 0.51 * dt**2 * u1**2 - 1.0, + 0.51 * dt**2 * u1 * u2 - 1.0 * dt * u3, + 1.0 * dt**2 * u1 * u3 - 1.0 * dt * u2, + ], + [ + -0.51 * dt**2 * u1 * u2 + 1.0 * dt * u3, + -0.51 * dt**2 * u2**2 - 1.0, + 1.0 * dt**2 * u2 * u3 + 1.0 * dt * u1, + ], + [ + -0.51 * dt**2 * u1 * u3 + 0.51 * dt * u2, + 0.51 * dt**2 * u2 * u3 + 0.51 * dt * u1, + -1.0 * dt**2 * u3**2 - 1.0, + ], + ] + ) + dgInv /= ( + 1.02 * dt**3 * u1 * u2 * u3 + 0.51 * dt**2 * u1**2 - 0.51 * dt**2 * u2**2 - 1.0 * dt**2 * u3**2 - 1.0 + ) + + # newton update: u1 = u0 - g/dg + u -= dgInv @ g + + # increase iteration count and work counter + n += 1 + self.work_counters['newton']() + + if np.isnan(res) and self.stop_at_nan: + raise ProblemError('Newton got nan after %i iterations, aborting...' % n) + elif np.isnan(res): # pragma: no cover + self.logger.warning('Newton got nan after %i iterations...' % n) + + if n == self.newton_maxiter: + raise ProblemError('Newton did not converge after %i iterations, error is %s' % (n, res)) + + return u diff --git a/pySDC/implementations/sweeper_classes/Runge_Kutta.py b/pySDC/implementations/sweeper_classes/Runge_Kutta.py index f25ff74e09..29c6219d7b 100644 --- a/pySDC/implementations/sweeper_classes/Runge_Kutta.py +++ b/pySDC/implementations/sweeper_classes/Runge_Kutta.py @@ -278,14 +278,14 @@ def update_nodes(self): # implicit solve with prefactor stemming from the diagonal of Qd, use previous stage as initial guess if self.coll.implicit: lvl.u[m + 1][:] = prob.solve_system( - rhs, lvl.dt * self.QI[m + 1, m + 1], lvl.u[m], lvl.time + lvl.dt * self.coll.nodes[m] + rhs, lvl.dt * self.QI[m + 1, m + 1], lvl.u[m], lvl.time + lvl.dt * self.coll.nodes[m + 1] ) else: lvl.u[m + 1][:] = rhs[:] # update function values (we don't usually need to evaluate the RHS at the solution of the step) if m < M - self.coll.num_solution_stages or self.params.eval_rhs_at_right_boundary: - lvl.f[m + 1] = prob.eval_f(lvl.u[m + 1], lvl.time + lvl.dt * self.coll.nodes[m]) + lvl.f[m + 1] = prob.eval_f(lvl.u[m + 1], lvl.time + lvl.dt * self.coll.nodes[m + 1]) # indicate presence of new values at this level lvl.status.updated = True @@ -434,12 +434,12 @@ def update_nodes(self): # implicit solve with prefactor stemming from the diagonal of Qd, use previous stage as initial guess lvl.u[m + 1][:] = prob.solve_system( - rhs, lvl.dt * self.QI[m + 1, m + 1], lvl.u[m], lvl.time + lvl.dt * self.coll.nodes[m] + rhs, lvl.dt * self.QI[m + 1, m + 1], lvl.u[m], lvl.time + lvl.dt * self.coll.nodes[m + 1] ) # update function values (we don't usually need to evaluate the RHS at the solution of the step) if m < M - self.coll.num_solution_stages or self.params.eval_rhs_at_right_boundary: - lvl.f[m + 1] = prob.eval_f(lvl.u[m + 1], lvl.time + lvl.dt * self.coll.nodes[m]) + lvl.f[m + 1] = prob.eval_f(lvl.u[m + 1], lvl.time + lvl.dt * self.coll.nodes[m + 1]) # indicate presence of new values at this level lvl.status.updated = True @@ -470,7 +470,7 @@ class BackwardEuler(RungeKutta): A-stable first order method. """ - nodes = np.array([0.0]) + nodes = np.array([1.0]) weights = np.array([1.0]) matrix = np.array( [ @@ -592,7 +592,7 @@ def get_update_order(cls): class ESDIRK53(RungeKutta): """ - A-stable embedded RK pair of orders 5 and 3. + A-stable embedded RK pair of orders 5 and 3, ESDIRK5(3)6L[2]SA. Taken from [here](https://ntrs.nasa.gov/citations/20160005923) """ @@ -655,6 +655,71 @@ def get_update_order(cls): return 4 +class ESDIRK43(RungeKutta): + """ + A-stable embedded RK pair of orders 4 and 3, ESDIRK4(3)6L[2]SA. + Taken from [here](https://ntrs.nasa.gov/citations/20160005923) + """ + + s2 = 2**0.5 + + nodes = np.array([0, 1 / 2, (2 - 2**0.5) / 4, 5 / 8, 26 / 25, 1.0]) + matrix = np.zeros((6, 6)) + matrix[1, :2] = [1 / 4, 1 / 4] + matrix[2, :3] = [ + (1 - 2**0.5) / 8, + (1 - 2**0.5) / 8, + 1 / 4, + ] + matrix[3, :4] = [ + (5 - 7 * s2) / 64, + (5 - 7 * s2) / 64, + 7 * (1 + s2) / 32, + 1 / 4, + ] + matrix[4, :5] = [ + (-13796 - 54539 * s2) / 125000, + (-13796 - 54539 * s2) / 125000, + (506605 + 132109 * s2) / 437500, + 166 * (-97 + 376 * s2) / 109375, + 1 / 4, + ] + matrix[5, :] = [ + (1181 - 987 * s2) / 13782, + (1181 - 987 * s2) / 13782, + 47 * (-267 + 1783 * s2) / 273343, + -16 * (-22922 + 3525 * s2) / 571953, + -15625 * (97 + 376 * s2) / 90749876, + 1 / 4, + ] + + weights = np.array( + [ + [ + (1181 - 987 * s2) / 13782, + (1181 - 987 * s2) / 13782, + 47 * (-267 + 1783 * s2) / 273343, + -16 * (-22922 + 3525 * s2) / 571953, + -15625 * (97 + 376 * s2) / 90749876, + 1 / 4, + ], + [ + -480923228411.0 / 4982971448372, + -480923228411.0 / 4982971448372, + 6709447293961.0 / 12833189095359, + 3513175791894.0 / 6748737351361.0, + -498863281070.0 / 6042575550617.0, + 2077005547802.0 / 8945017530137.0, + ], + ] + ) + ButcherTableauClass = ButcherTableauEmbedded + + @classmethod + def get_update_order(cls): + return 4 + + class ARK548L2SAERK(RungeKutta): """ Explicit part of the ARK54 scheme. diff --git a/pySDC/implementations/sweeper_classes/generic_implicit.py b/pySDC/implementations/sweeper_classes/generic_implicit.py index e93060aba5..f64bf62db6 100644 --- a/pySDC/implementations/sweeper_classes/generic_implicit.py +++ b/pySDC/implementations/sweeper_classes/generic_implicit.py @@ -65,6 +65,15 @@ def update_nodes(self): # get number of collocation nodes for easier access M = self.coll.num_nodes + # update the MIN-SR-FLEX preconditioner + if self.params.QI.startswith('MIN-SR-FLEX'): + k = L.status.sweep + if k > M: + self.params.QI = "MIN-SR-S" + else: + self.params.QI = 'MIN-SR-FLEX' + str(k) + self.QI = self.get_Qdelta_implicit(self.coll, qd_type=self.params.QI) + # gather all terms which are known already (e.g. from the previous iteration) # this corresponds to u0 + QF(u^k) - QdF(u^k) + tau @@ -89,9 +98,11 @@ def update_nodes(self): rhs += L.dt * self.QI[m + 1, j] * L.f[j] # implicit solve with prefactor stemming from the diagonal of Qd - L.u[m + 1] = P.solve_system( - rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m] - ) + alpha = L.dt * self.QI[m + 1, m + 1] + if alpha == 0: + L.u[m + 1] = rhs + else: + L.u[m + 1] = P.solve_system(rhs, alpha, L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) # update function values L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) diff --git a/pySDC/implementations/sweeper_classes/generic_implicit_MPI.py b/pySDC/implementations/sweeper_classes/generic_implicit_MPI.py index 1533a1129a..7deaa8a511 100644 --- a/pySDC/implementations/sweeper_classes/generic_implicit_MPI.py +++ b/pySDC/implementations/sweeper_classes/generic_implicit_MPI.py @@ -1,7 +1,7 @@ from mpi4py import MPI from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit -from pySDC.core.Sweeper import sweeper +from pySDC.core.Sweeper import sweeper, ParameterError import logging @@ -85,9 +85,6 @@ def compute_residual(self, stage=None): L.status.residual = 0.0 if L.status.residual is None else L.status.residual return None - # check if there are new values (e.g. from a sweep) - # assert L.status.updated - # compute the residual for each node # build QF(u) @@ -130,12 +127,22 @@ def predict(self): # evaluate RHS at left point L.f[0] = P.eval_f(L.u[0], L.time) + m = self.rank + if self.params.initial_guess == 'spread': - L.u[self.rank + 1] = P.dtype_u(L.u[0]) - L.f[self.rank + 1] = P.eval_f(L.u[self.rank + 1], L.time + L.dt * self.coll.nodes[self.rank]) + # copy u[0] to all collocation nodes, evaluate RHS + L.u[m + 1] = P.dtype_u(L.u[0]) + L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) + elif self.params.initial_guess == 'copy': + # copy u[0] and RHS evaluation to all collocation nodes + L.u[m + 1] = P.dtype_u(L.u[0]) + L.f[m + 1] = P.dtype_f(L.f[0]) + elif self.params.initial_guess == 'zero': + # zeros solution for u and RHS + L.u[m + 1] = P.dtype_u(init=P.init, val=0.0) + L.f[m + 1] = P.dtype_f(init=P.init, val=0.0) else: - L.u[self.rank + 1] = P.dtype_u(init=P.init, val=0.0) - L.f[self.rank + 1] = P.dtype_f(init=P.init, val=0.0) + raise ParameterError(f'initial_guess option {self.params.initial_guess} not implemented') # indicate that this level is now ready for sweeps L.status.unlocked = True diff --git a/pySDC/playgrounds/FEniCSx/HeatEquation_1D_FEniCSx_matrix_forced.py b/pySDC/playgrounds/FEniCSx/HeatEquation_1D_FEniCSx_matrix_forced.py new file mode 100644 index 0000000000..c8ed02ff3c --- /dev/null +++ b/pySDC/playgrounds/FEniCSx/HeatEquation_1D_FEniCSx_matrix_forced.py @@ -0,0 +1,245 @@ +import logging + +import matplotlib.pyplot as plt +from mpi4py import MPI +from petsc4py import PETSc +import dolfinx as dfx +import ufl +from matplotlib import pyplot as plt + +import numpy as np + +from pySDC.core.Errors import ParameterError +from pySDC.core.Problem import ptype +from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh + +# noinspection PyUnusedLocal +class fenicsx_heat(ptype): + """ + Example implementing the forced 1D heat equation with Dirichlet-0 BC in [0,1] + + Attributes: + V: function space + M: mass matrix for FEM + K: stiffness matrix incl. diffusion coefficient (and correct sign) + g: forcing term + bc: boundary conditions + """ + + def __init__(self, problem_params, dtype_u=mesh, dtype_f=imex_mesh): + """ + Initialization routine + + Args: + problem_params (dict): custom parameters for the example + dtype_u: FEniCS mesh data type (will be passed to parent class) + dtype_f: FEniCS mesh data data type with implicit and explicit parts (will be passed to parent class) + """ + + # define the Dirichlet boundary + # def Boundary(x, on_boundary): + # return on_boundary + + if 'comm' not in problem_params: + problem_params['comm'] = MPI.COMM_WORLD + + # these parameters will be used later, so assert their existence + essential_keys = ['nelems', 't0', 'family', 'order', 'refinements', 'nu', 'comm'] + for key in essential_keys: + if key not in problem_params: + msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys())) + raise ParameterError(msg) + + # Define mesh + domain = dfx.mesh.create_interval(problem_params['comm'], nx=problem_params['nelems'], points=np.array([0, 1])) + self.V = dfx.fem.FunctionSpace(domain, (problem_params['family'], problem_params['order'])) + self.x = ufl.SpatialCoordinate(domain) + tmp = dfx.fem.Function(self.V) + nx = len(tmp.x.array) + + # invoke super init, passing number of dofs, dtype_u and dtype_f + super(fenicsx_heat, self).__init__((nx, problem_params['comm'], np.dtype('float64')), dtype_u, dtype_f, problem_params) + + # Create boundary condition + fdim = domain.topology.dim - 1 + boundary_facets = dfx.mesh.locate_entities_boundary( + domain, fdim, lambda x: np.full(x.shape[1], True, dtype=bool)) + self.bc = dfx.fem.dirichletbc(PETSc.ScalarType(0), dfx.fem.locate_dofs_topological(self.V, fdim, boundary_facets), self.V) + + # Stiffness term (Laplace) and mass term + u = ufl.TrialFunction(self.V) + v = ufl.TestFunction(self.V) + + a_K = -1.0 * ufl.dot(ufl.grad(u), self.params.nu * ufl.grad(v)) * ufl.dx + a_M = u * v * ufl.dx + + self.K = dfx.fem.petsc.assemble_matrix(dfx.fem.form(a_K), bcs=[self.bc]) + self.K.assemble() + + self.M = dfx.fem.petsc.assemble_matrix(dfx.fem.form(a_M), bcs=[self.bc]) + self.M.assemble() + + # set forcing term + self.g = dfx.fem.Function(self.V) + t = self.params.t0 + self.g.interpolate(lambda x: -np.sin(2 * np.pi*x[0]) * (np.sin(t) - 4 * self.params.nu*np.pi*np.pi*np.cos(t))) + + self.tmp_u = dfx.fem.Function(self.V) + self.tmp_f = dfx.fem.Function(self.V) + + self.solver = PETSc.KSP().create(domain.comm) + self.solver.setType(PETSc.KSP.Type.PREONLY) + self.solver.getPC().setType(PETSc.PC.Type.LU) + + @staticmethod + def convert_to_fenicsx_vector(input, output): + output.x.array[:] = input[:] + + @staticmethod + def convert_from_fenicsx_vector(input, output): + output[:] = input.x.array[:] + + def solve_system(self, rhs, factor, u0, t): + """ + Dolfin's linear solver for (M-dtA)u = rhs + + Args: + rhs (dtype_f): right-hand side for the nonlinear system + factor (float): abbrev. for the node-to-node stepsize (or any other factor required) + u0 (dtype_u_: initial guess for the iterative solver (not used here so far) + t (float): current time + + Returns: + dtype_u: solution as mesh + """ + + self.convert_to_fenicsx_vector(input=u0, output=self.tmp_u) + self.convert_to_fenicsx_vector(input=rhs, output=self.tmp_f) + b = dfx.fem.Function(self.V) + self.M.mult(self.tmp_f.vector, b.vector) + + dfx.fem.petsc.set_bc(b.vector, [self.bc]) + self.solver.setOperators(self.M - factor * self.K) + self.solver.solve(b.vector, self.tmp_u.vector) + # tmp_u.x.scatter_forward() + + u = self.dtype_u(self.init) + self.convert_from_fenicsx_vector(input=self.tmp_u, output=u) + + return u + + def apply_mass_matrix(self, u): + + self.convert_to_fenicsx_vector(input=u, output=self.tmp_u) + self.M.mult(self.tmp_u.vector, self.tmp_f.vector) + uM = self.dtype_u(self.init) + self.convert_from_fenicsx_vector(input=self.tmp_f, output=uM) + return uM + + def eval_f(self, u, t): + """ + Routine to evaluate both parts of the RHS + + Args: + u (dtype_u): current values + t (float): current time + + Returns: + dtype_f: the RHS divided into two parts + """ + + f = self.dtype_f(self.init) + b = dfx.fem.Function(self.V) + + self.convert_to_fenicsx_vector(input=u, output=self.tmp_u) + self.K.mult(self.tmp_u.vector, b.vector) + + + self.solver.setOperators(self.M) + self.solver.solve(b.vector, self.tmp_f.vector) + self.convert_from_fenicsx_vector(input=self.tmp_f, output=f.impl) + + self.g.interpolate(lambda x: -np.sin(2 * np.pi * x[0]) * (np.sin(t) - self.params.nu * np.pi * np.pi * 4 * np.cos(t))) + # self.M.mult(self.g.vector, self.tmp_f.vector) + # self.convert_from_fenicsx_vector(input=self.tmp_f, output=f.expl) + self.convert_from_fenicsx_vector(input=self.g, output=f.expl) + + return f + + def u_exact(self, t): + """ + Routine to compute the exact solution at time t + + Args: + t (float): current time + + Returns: + dtype_u: exact solution + """ + + u0 = dfx.fem.Function(self.V) + u0.interpolate(lambda x: np.sin(2 * np.pi * x[0]) * np.cos(t)) + + me = self.dtype_u(self.init) + self.convert_from_fenicsx_vector(input=u0, output=me) + + return me + + +#noinspection PyUnusedLocal +class fenicsx_heat_mass(fenicsx_heat): + """ + Example implementing the forced 1D heat equation with Dirichlet-0 BC in [0,1], expects mass matrix sweeper + + """ + + def solve_system(self, rhs, factor, u0, t): + """ + Dolfin's linear solver for (M-dtA)u = rhs + + Args: + rhs (dtype_f): right-hand side for the nonlinear system + factor (float): abbrev. for the node-to-node stepsize (or any other factor required) + u0 (dtype_u_: initial guess for the iterative solver (not used here so far) + t (float): current time + + Returns: + dtype_u: solution as mesh + """ + + self.convert_to_fenicsx_vector(input=u0, output=self.tmp_u) + self.convert_to_fenicsx_vector(input=rhs, output=self.tmp_f) + + dfx.fem.petsc.set_bc(self.tmp_f.vector, [self.bc]) + self.solver.setOperators(self.M - factor * self.K) + self.solver.solve(self.tmp_f.vector, self.tmp_u.vector) + # tmp_u.x.scatter_forward() + + u = self.dtype_u(self.init) + self.convert_from_fenicsx_vector(input=self.tmp_u, output=u) + + return u + + def eval_f(self, u, t): + """ + Routine to evaluate both parts of the RHS + + Args: + u (dtype_u): current values + t (float): current time + + Returns: + dtype_f: the RHS divided into two parts + """ + + f = self.dtype_f(self.init) + + self.convert_to_fenicsx_vector(input=u, output=self.tmp_u) + self.K.mult(self.tmp_u.vector, self.tmp_f.vector) + self.convert_from_fenicsx_vector(input=self.tmp_f, output=f.impl) + + self.g.interpolate(lambda x: -np.sin(2 * np.pi * x[0]) * (np.sin(t) - self.params.nu * np.pi * np.pi * 4 * np.cos(t))) + self.M.mult(self.g.vector, self.tmp_f.vector) + self.convert_from_fenicsx_vector(input=self.tmp_f, output=f.expl) + + return f diff --git a/pySDC/playgrounds/FEniCSx/HeatEquation_1D_FEniCSx_matrix_forced_2.py b/pySDC/playgrounds/FEniCSx/HeatEquation_1D_FEniCSx_matrix_forced_2.py new file mode 100644 index 0000000000..8532928a92 --- /dev/null +++ b/pySDC/playgrounds/FEniCSx/HeatEquation_1D_FEniCSx_matrix_forced_2.py @@ -0,0 +1,272 @@ +import logging + +from mpi4py import MPI +from petsc4py import PETSc +import dolfinx as dfx +import ufl +import numpy as np + +from pySDC.core.Problem import ptype +from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh + + +# noinspection PyUnusedLocal +class fenicsx_heat_mass(ptype): + r""" + Example implementing the forced one-dimensional heat equation with Dirichlet boundary conditions + + .. math:: + \frac{d u}{d t} = \nu \frac{d^2 u}{d x^2} + f + + for :math:`x \in \Omega:=[0,1]`, where the forcing term :math:`f` is defined by + + .. math:: + f(x, t) = -\sin(\pi x) (\sin(t) - \nu \pi^2 \cos(t)). + + For initial conditions with constant c and + + .. math:: + u(x, 0) = \sin(\pi x) + c + + the exact solution of the problem is given by + + .. math:: + u(x, t) = \sin(\pi x)\cos(t) + c. + + In this class the problem is implemented in the way that the spatial part is solved using ``FEniCS`` [1]_. Hence, the problem + is reformulated to the *weak formulation* + + .. math: + \int_\Omega u_t v\,dx = - \nu \int_\Omega \nabla u \nabla v\,dx + \int_\Omega f v\,dx. + + The part containing the forcing term is treated explicitly, where it is interpolated in the function space. + The other part will be treated in an implicit way. + + Parameters + ---------- + c_nvars : int, optional + Spatial resolution, i.e., numbers of degrees of freedom in space. + t0 : float, optional + Starting time. + family : str, optional + Indicates the family of elements used to create the function space + for the trail and test functions. The default is ``'CG'``, which are the class + of Continuous Galerkin, a *synonym* for the Lagrange family of elements, see [2]_. + order : int, optional + Defines the order of the elements in the function space. + refinements : int, optional + Denotes the refinement of the mesh. ``refinements=2`` refines the mesh by factor :math:`2`. + nu : float, optional + Diffusion coefficient :math:`\nu`. + c: float, optional + Constant for the Dirichlet boundary condition :math: `c` + + Attributes + ---------- + V : FunctionSpace + Defines the function space of the trial and test functions. + M : scalar, vector, matrix or higher rank tensor + Denotes the expression :math:`\int_\Omega u_t v\,dx`. + K : scalar, vector, matrix or higher rank tensor + Denotes the expression :math:`- \nu \int_\Omega \nabla u \nabla v\,dx`. + g : Expression + The forcing term :math:`f` in the heat equation. + bc : DirichletBC + Denotes the Dirichlet boundary conditions. + + References + ---------- + .. [1] The FEniCS Project Version 1.5. M. S. Alnaes, J. Blechta, J. Hake, A. Johansson, B. Kehlet, A. Logg, + C. Richardson, J. Ring, M. E. Rognes, G. N. Wells. Archive of Numerical Software (2015). + .. [2] Automated Solution of Differential Equations by the Finite Element Method. A. Logg, K.-A. Mardal, G. N. + Wells and others. Springer (2012). + """ + + dtype_u = mesh + dtype_f = imex_mesh + + def __init__(self, nelems=128, t0=0.0, family='CG', order=4, refinements=1, nu=0.1, c=0.0, comm=MPI.COMM_WORLD): + """Initialization routine""" + + # Define mesh + domain = dfx.mesh.create_interval(comm, nx=nelems, points=np.array([0, 1])) + self.V = dfx.fem.FunctionSpace(domain, (family, order)) + self.x = ufl.SpatialCoordinate(domain) + tmp = dfx.fem.Function(self.V) + nx = len(tmp.x.array) + + # invoke super init, passing number of dofs, dtype_u and dtype_f + super().__init__(init=(nx, None, np.dtype('float64'))) + self._makeAttributeAndRegister( + 'nelems', 't0', 'family', 'order', 'refinements', 'nu', 'c', localVars=locals(), readOnly=True + ) + + # Create boundary condition + fdim = domain.topology.dim - 1 + boundary_facets = dfx.mesh.locate_entities_boundary( + domain, fdim, lambda x: np.full(x.shape[1], True, dtype=bool)) + self.bc = dfx.fem.dirichletbc(PETSc.ScalarType(self.c), + dfx.fem.locate_dofs_topological(self.V, fdim, boundary_facets), self.V) + self.bc_hom = dfx.fem.dirichletbc(PETSc.ScalarType(0), + dfx.fem.locate_dofs_topological(self.V, fdim, boundary_facets), self.V) + self.fix_bc_for_residual = True + + # Stiffness term (Laplace) and mass term + u = ufl.TrialFunction(self.V) + v = ufl.TestFunction(self.V) + + a_K = -1.0 * ufl.dot(ufl.grad(u), self.nu * ufl.grad(v)) * ufl.dx + a_M = u * v * ufl.dx + + self.K = dfx.fem.petsc.assemble_matrix(dfx.fem.form(a_K), bcs=[self.bc]) + self.K.assemble() + + self.M = dfx.fem.petsc.assemble_matrix(dfx.fem.form(a_M), bcs=[self.bc]) + self.M.assemble() + + # set forcing term + self.g = dfx.fem.Function(self.V) + t = self.t0 + self.g.interpolate( + lambda x: -np.sin(2 * np.pi * x[0]) * (np.sin(t) - 4 * self.nu * np.pi * np.pi * np.cos(t))) + + self.tmp_u = dfx.fem.Function(self.V) + self.tmp_f = dfx.fem.Function(self.V) + + self.solver = PETSc.KSP().create(domain.comm) + self.solver.setType(PETSc.KSP.Type.PREONLY) + self.solver.getPC().setType(PETSc.PC.Type.LU) + + @staticmethod + def convert_to_fenicsx_vector(input, output): + output.x.array[:] = input[:] + + @staticmethod + def convert_from_fenicsx_vector(input, output): + output[:] = input.x.array[:] + + def solve_system(self, rhs, factor, u0, t): + r""" + Dolfin's linear solver for :math:`(M - factor \cdot A) \vec{u} = \vec{rhs}`. + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the nonlinear system. + factor : float + Abbrev. for the node-to-node stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver (not used here so far). + t : float + Current time. + + Returns + ------- + u : dtype_u + Solution. + """ + + self.convert_to_fenicsx_vector(input=u0, output=self.tmp_u) + self.convert_to_fenicsx_vector(input=rhs, output=self.tmp_f) + # T = self.M - factor * self.K + # dofs, _ = self.bc.dof_indices() + # T.zeroRowsLocal(dofs, diag=1) + dfx.fem.petsc.set_bc(self.tmp_f.vector, [self.bc]) + self.solver.setOperators(self.M - factor * self.K) + self.solver.solve(self.tmp_f.vector, self.tmp_u.vector) + # dfx.fem.petsc.set_bc(self.tmp_u.vector, [self.bc]) + + u = self.dtype_u(self.init) + self.convert_from_fenicsx_vector(input=self.tmp_u, output=u) + + return u + + def eval_f(self, u, t): + """ + Routine to evaluate both parts of the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time at which the numerical solution is computed. + + Returns + ------- + f : dtype_f + The right-hand side divided into two parts. + """ + + f = self.dtype_f(self.init) + + self.convert_to_fenicsx_vector(input=u, output=self.tmp_u) + self.K.mult(self.tmp_u.vector, self.tmp_f.vector) + self.convert_from_fenicsx_vector(input=self.tmp_f, output=f.impl) + + self.g.interpolate( + lambda x: -np.sin(2 * np.pi * x[0]) * (np.sin(t) - self.nu * np.pi * np.pi * 4 * np.cos(t))) + self.M.mult(self.g.vector, self.tmp_f.vector) + self.convert_from_fenicsx_vector(input=self.tmp_f, output=f.expl) + + return f + + def apply_mass_matrix(self, u): + r""" + Routine to apply mass matrix. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + + Returns + ------- + me : dtype_u + The product :math:`M \vec{u}`. + """ + + self.convert_to_fenicsx_vector(input=u, output=self.tmp_u) + self.M.mult(self.tmp_u.vector, self.tmp_f.vector) + uM = self.dtype_u(self.init) + self.convert_from_fenicsx_vector(input=self.tmp_f, output=uM) + return uM + + + def u_exact(self, t): + r""" + Routine to compute the exact solution at time :math:`t`. + + Parameters + ---------- + t : float + Time of the exact solution. + + Returns + ------- + me : dtype_u + Exact solution. + """ + + u0 = dfx.fem.Function(self.V) + u0.interpolate(lambda x: np.sin(2 * np.pi * x[0]) * np.cos(t) + self.c) + + me = self.dtype_u(self.init) + self.convert_from_fenicsx_vector(input=u0, output=me) + + return me + + def fix_residual(self, res): + """ + Applies homogeneous Dirichlet boundary conditions to the residual + + Parameters + ---------- + res : dtype_u + Residual + """ + self.convert_to_fenicsx_vector(input=res, output=self.tmp_u) + dfx.fem.petsc.set_bc(self.tmp_u.vector, [self.bc_hom]) + self.tmp_u.x.scatter_forward() + self.convert_from_fenicsx_vector(input=self.tmp_u, output=res) + return None diff --git a/pySDC/playgrounds/FEniCSx/HookClass_FEniCS_output.py b/pySDC/playgrounds/FEniCSx/HookClass_FEniCS_output.py new file mode 100644 index 0000000000..0b820c8641 --- /dev/null +++ b/pySDC/playgrounds/FEniCSx/HookClass_FEniCS_output.py @@ -0,0 +1,79 @@ +# import dolfin as df + +from pySDC.core.Hooks import hooks + +# file = df.File('output1d/grayscott.pvd') # dirty, but this has to be unique (and not per step or level) + + +class fenics_output(hooks): + """ + Hook class to add output to FEniCS runs + """ + + # def pre_run(self, step, level_number): + # """ + # Overwrite default routine called before time-loop starts + # + # Args: + # step: the current step + # level_number: the current level number + # """ + # super(fenics_output, self).pre_run(step, level_number) + # + # # some abbreviations + # L = step.levels[level_number] + # + # v = L.u[0].values + # v.rename('func', 'label') + # + # file << v + + def post_iteration(self, step, level_number): + + super(fenics_output, self).post_iteration(step, level_number) + + # some abbreviations + L = step.levels[level_number] + + uex = L.prob.u_exact(L.time + L.dt) + + err = abs(uex - L.u[-1]) / abs(uex) + + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='error', + value=err, + ) + + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='residual', + value=L.status.residual / abs(L.u[0]), + ) + + # def post_step(self, step, level_number): + # """ + # Default routine called after each iteration + # Args: + # step: the current step + # level_number: the current level number + # """ + # + # super(fenics_output, self).post_step(step, level_number) + # + # # some abbreviations + # L = step.levels[level_number] + # + # # u1,u2 = df.split(L.uend.values) + # v = L.uend.values + # v.rename('func', 'label') + # + # file << v diff --git a/pySDC/playgrounds/FEniCSx/__init__.py b/pySDC/playgrounds/FEniCSx/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pySDC/playgrounds/FEniCSx/heat_equation_M.py b/pySDC/playgrounds/FEniCSx/heat_equation_M.py new file mode 100644 index 0000000000..6e0446570a --- /dev/null +++ b/pySDC/playgrounds/FEniCSx/heat_equation_M.py @@ -0,0 +1,209 @@ +import numpy as np + + +from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI +# from pySDC.implementations.problem_classes.HeatEquation_1D_FEniCS_matrix_forced import fenics_heat_mass +from pySDC.implementations.sweeper_classes.imex_1st_order_mass import imex_1st_order_mass +# from pySDC.implementations.transfer_classes.TransferFenicsMesh import mesh_to_mesh_fenics +from pySDC.playgrounds.FEniCSx.HeatEquation_1D_FEniCSx_matrix_forced_2 import fenicsx_heat_mass +from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order + +from pySDC.playgrounds.FEniCSx.HookClass_FEniCS_output import fenics_output + +from pySDC.helpers.stats_helper import get_sorted +import pySDC.helpers.plot_helper as plt_helper + + +def run_simulation(ml=None, mass=None): + + t0 = 0 + dt = 0.2 + Tend = 0.2 + + # initialize level parameters + level_params = dict() + level_params['restol'] = 1e-10 + level_params['dt'] = dt + + # initialize step parameters + step_params = dict() + step_params['maxiter'] = 20 + + # initialize sweeper parameters + sweeper_params = dict() + sweeper_params['quad_type'] = 'RADAU-RIGHT' + sweeper_params['num_nodes'] = [3] + + problem_params = dict() + problem_params['nu'] = 0.1 + problem_params['t0'] = t0 # ugly, but necessary to set up ProblemClass + problem_params['nelems'] = [128] + problem_params['family'] = 'CG' + problem_params['order'] = [4] + problem_params['c'] = [1.0] + if ml: + problem_params['refinements'] = [1, 0] + else: + problem_params['refinements'] = [1] + + # initialize controller parameters + controller_params = dict() + controller_params['logger_level'] = 30 + controller_params['hook_class'] = fenics_output + + # Fill description dictionary for easy hierarchy creation + description = dict() + if mass: + description['problem_class'] = fenicsx_heat_mass + description['sweeper_class'] = imex_1st_order_mass + # description['base_transfer_class'] = base_transfer_mass + else: + description['problem_class'] = fenicsx_heat + description['sweeper_class'] = imex_1st_order + description['problem_params'] = problem_params + description['sweeper_params'] = sweeper_params + description['level_params'] = level_params + description['step_params'] = step_params + # description['space_transfer_class'] = mesh_to_mesh_fenics + + # quickly generate block of steps + controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description) + + # get initial values on finest level + P = controller.MS[0].levels[0].prob + uinit = P.u_exact(t0) + + # call main function to get things done... + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + errors = get_sorted(stats, type='error', sortby='iter') + residuals = get_sorted(stats, type='residual', sortby='iter') + print(errors) + return errors, residuals + + +def visualize(): + + errors_sdc_M = np.load('errors_sdc_M.npy') + errors_sdc_noM = np.load('errors_sdc_noM.npy') + errors_mlsdc_M = np.load('errors_mlsdc_M.npy') + errors_mlsdc_noM = np.load('errors_mlsdc_noM.npy') + + plt_helper.setup_mpl() + + plt_helper.newfig(240, 1, ratio=0.8) + + plt_helper.plt.semilogy( + [err[0] for err in errors_sdc_noM], + [err[1] for err in errors_sdc_noM], + lw=2, + marker='s', + markersize=6, + color='darkblue', + label='SDC without M', + ) + + plt_helper.plt.xlim([0, 11]) + plt_helper.plt.ylim([6e-09, 2e-03]) + plt_helper.plt.xlabel('iteration') + plt_helper.plt.ylabel('error') + plt_helper.plt.legend() + plt_helper.plt.grid() + + plt_helper.savefig('error_SDC_noM_CG_4') + + plt_helper.newfig(240, 1, ratio=0.8) + + plt_helper.plt.semilogy( + [err[0] for err in errors_sdc_noM], + [err[1] for err in errors_sdc_noM], + lw=2, + color='darkblue', + marker='s', + markersize=6, + label='SDC without M', + ) + plt_helper.plt.semilogy( + [err[0] for err in errors_sdc_M], + [err[1] for err in errors_sdc_M], + lw=2, + marker='o', + markersize=6, + color='red', + label='SDC with M', + ) + + plt_helper.plt.xlim([0, 11]) + plt_helper.plt.ylim([6e-09, 2e-03]) + plt_helper.plt.xlabel('iteration') + plt_helper.plt.ylabel('error') + plt_helper.plt.legend() + plt_helper.plt.grid() + + plt_helper.savefig('error_SDC_M_CG_4') + + plt_helper.newfig(240, 1, ratio=0.8) + + plt_helper.plt.semilogy( + [err[0] for err in errors_mlsdc_noM], + [err[1] for err in errors_mlsdc_noM], + lw=2, + marker='s', + markersize=6, + color='darkblue', + label='MLSDC without M', + ) + + plt_helper.plt.xlim([0, 11]) + plt_helper.plt.ylim([6e-09, 2e-03]) + plt_helper.plt.xlabel('iteration') + plt_helper.plt.ylabel('error') + plt_helper.plt.legend() + plt_helper.plt.grid() + + plt_helper.savefig('error_MLSDC_noM_CG_4') + + plt_helper.newfig(240, 1, ratio=0.8) + + plt_helper.plt.semilogy( + [err[0] for err in errors_mlsdc_noM], + [err[1] for err in errors_mlsdc_noM], + lw=2, + color='darkblue', + marker='s', + markersize=6, + label='MLSDC without M', + ) + plt_helper.plt.semilogy( + [err[0] for err in errors_mlsdc_M], + [err[1] for err in errors_mlsdc_M], + lw=2, + marker='o', + markersize=6, + color='red', + label='MLSDC with M', + ) + + plt_helper.plt.xlim([0, 11]) + plt_helper.plt.ylim([6e-09, 2e-03]) + plt_helper.plt.xlabel('iteration') + plt_helper.plt.ylabel('error') + plt_helper.plt.legend() + plt_helper.plt.grid() + + plt_helper.savefig('error_MLSDC_M_CG_4') + + +if __name__ == "__main__": + + # errors_sdc_noM, _ = run_simulation(ml=False, mass=False) + errors_sdc_M, _ = run_simulation(ml=False, mass=True) + # errors_mlsdc_noM, _ = run_simulation(ml=True, mass=False) + # errors_mlsdc_M, _ = run_simulation(ml=True, mass=True) + # + # np.save('errors_sdc_M.npy', errors_sdc_M) + # np.save('errors_sdc_noM.npy', errors_sdc_noM) + # np.save('errors_mlsdc_M.npy', errors_mlsdc_M) + # np.save('errors_mlsdc_noM.npy', errors_mlsdc_noM) + + # visualize() diff --git a/pySDC/playgrounds/FEniCSx/heat_equation_raw.py b/pySDC/playgrounds/FEniCSx/heat_equation_raw.py new file mode 100644 index 0000000000..2957074f8d --- /dev/null +++ b/pySDC/playgrounds/FEniCSx/heat_equation_raw.py @@ -0,0 +1,108 @@ +import numpy as np + +from mpi4py import MPI +from petsc4py import PETSc +import ufl +import pyvista + +from dolfinx import fem, mesh, plot + + +def plot_function(t, uh): + """ + Create a figure of the concentration uh warped visualized in 3D at timet step t. + """ + p = pyvista.Plotter() + # Update point values on pyvista grid + grid.point_data[f"u({t})"] = uh.x.array.real + # Warp mesh by point values + warped = grid.warp_by_scalar(f"u({t})", factor=1.5) + + # Add mesh to plotter and visualize in notebook or save as figure + actor = p.add_mesh(warped) + if not pyvista.OFF_SCREEN: + p.show() + else: + pyvista.start_xvfb() + figure_as_array = p.screenshot(f"diffusion_{t:.2f}.png") + # Clear plotter for next plot + p.remove_actor(actor) + +# Define temporal parameters +t = 0 # Start time +T = 2.0 # Final time +num_steps = 61 +dt = T / num_steps # time step size + +# Define mesh +nx, ny = 50, 50 +domain = mesh.create_rectangle(MPI.COMM_WORLD, [np.array([-2, -2]), np.array([2, 2])], + [nx, ny], mesh.CellType.triangle) +V = fem.FunctionSpace(domain, ("CG", 1)) + +# Create initial condition +def initial_condition(x, a=5): + return np.exp(-a*(x[0]**2+x[1]**2)) +u_n = fem.Function(V) +u_n.name = "u_n" +u_n.interpolate(initial_condition) + +# Create boundary condition +fdim = domain.topology.dim - 1 +boundary_facets = mesh.locate_entities_boundary( + domain, fdim, lambda x: np.full(x.shape[1], True, dtype=bool)) +bc = fem.dirichletbc(PETSc.ScalarType(0), fem.locate_dofs_topological(V, fdim, boundary_facets), V) + +# Define solution variable, and interpolate initial solution for visualization in Paraview +uh = fem.Function(V) +uh.name = "uh" +uh.interpolate(initial_condition) + +u, v = ufl.TrialFunction(V), ufl.TestFunction(V) +f = fem.Constant(domain, PETSc.ScalarType(0)) +a = u * v * ufl.dx + dt*ufl.dot(ufl.grad(u), ufl.grad(v)) * ufl.dx +L = (u_n + dt * f) * v * ufl.dx + +bilinear_form = fem.form(a) +linear_form = fem.form(L) + +A = fem.petsc.assemble_matrix(bilinear_form, bcs=[bc]) +A.assemble() +b = fem.petsc.create_vector(linear_form) + +solver = PETSc.KSP().create(domain.comm) +solver.setOperators(A) +solver.setType(PETSc.KSP.Type.PREONLY) +solver.getPC().setType(PETSc.PC.Type.LU) + +pyvista.set_jupyter_backend("ipygany") + +grid = pyvista.UnstructuredGrid(*plot.create_vtk_mesh(V)) + +# plot_function(0, uh) + +for i in range(num_steps): + t += dt + + # Update the right hand side reusing the initial vector + with b.localForm() as loc_b: + loc_b.set(0) + fem.petsc.assemble_vector(b, linear_form) + + # Apply Dirichlet boundary condition to the vector + fem.petsc.apply_lifting(b, [bilinear_form], [[bc]]) + b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) + fem.petsc.set_bc(b, [bc]) + + # Solve linear problem + solver.solve(b, uh.vector) + uh.x.scatter_forward() + + # Update solution at previous time step (u_n) + u_n.x.array[:] = uh.x.array + + # # Plot every 15th time step + # if i % 15 == 0: + # plot_function(t, uh) + +# plot_function(t, uh) \ No newline at end of file diff --git a/pySDC/playgrounds/FEniCSx/playground.py b/pySDC/playgrounds/FEniCSx/playground.py new file mode 100644 index 0000000000..b62516caf5 --- /dev/null +++ b/pySDC/playgrounds/FEniCSx/playground.py @@ -0,0 +1,43 @@ +from dolfinx import fem, mesh +from mpi4py import MPI +import numpy as np + + + +def initial_condition(x, a=5): + return np.exp(-a*(x[0]**2+x[1]**2)) + + +comm = MPI.COMM_WORLD +comm_fenics = comm.Split(color=comm.Get_rank()) +print(comm_fenics.Get_size()) +# Define mesh +nx, ny = 50, 50 +domain = mesh.create_rectangle(comm_fenics, [np.array([-2, -2]), np.array([2, 2])], + [nx, ny], mesh.CellType.triangle) +V = fem.FunctionSpace(domain, ("CG", 4)) +nxc, nyc = 50, 50 +domainc = mesh.create_rectangle(comm_fenics, [np.array([-2, -2]), np.array([2, 2])], + [nxc, nyc], mesh.CellType.triangle) +Vc = fem.FunctionSpace(domain, ("CG", 1)) ## WORKS +Vc = fem.FunctionSpace(domainc, ("CG", 1)) ## DOES NOT WORK + +u = fem.Function(V) +uc = fem.Function(Vc) + +u.interpolate(lambda x: initial_condition(x,a=5)) + +uc.interpolate(u) + + +# w_n = fem.Function(V) +# w_n.name = "w_n" +# w_n.x.array[:] = 99 +# +# print(np.mean(w_n.x.array)) +# +# if comm.Get_size() == 2: +# if comm.Get_rank() == 0: +# comm.send(u_n.x.array, dest=1) +# else: +# v_n.x.array[:] = comm.recv(source=0) diff --git a/pySDC/playgrounds/ML_initial_guess/README.md b/pySDC/playgrounds/ML_initial_guess/README.md new file mode 100644 index 0000000000..617578512a --- /dev/null +++ b/pySDC/playgrounds/ML_initial_guess/README.md @@ -0,0 +1,19 @@ +Machine learning initial guesses for SDC +---------------------------------------- + +Most linear solves in SDC are performed in the first iteration. Afterwards, SDC providing good initial guesses is actually one of its strengths. +To get a better initial guess for "free", and to stay hip, we want to do this with machine learning. + +This playground is very much work in progress! +The first thing we did was to build a simple datatype for PyTorch that we can use in pySDC. Keep in mind that it is very inefficient and I don't think it works with MPI yet. But it's good enough for counting iterations. Once we have a proof of concept, we should refine this. +Then, we setup a simple heat equation with this datatype in `heat.py`. +The crucial new function is `ML_predict`, which loads an already trained model and evaluates it. +This, in turn, is called during `predict` in the sweeper. (See `sweeper.py`) +But we need to train the model, of course. This is done in `ml_heat.py`. + +How to move on with this project: +================================= +The first thing you might want to do is to fix the neural network that solves the heat equation. Our first try was too simplistic. +The next thing would be to not predict the solution at a single node, but at all collocation nodes simultaneously. Maybe, actually start with this. +If you get a proof of concept, you can clean up the datatype, such that it is even fast. +You can do a "physics-informed" learning process of predicting the entire collocation solution by means of the residual. This is very generic, actually. diff --git a/pySDC/playgrounds/ML_initial_guess/heat.py b/pySDC/playgrounds/ML_initial_guess/heat.py new file mode 100644 index 0000000000..a6661b922a --- /dev/null +++ b/pySDC/playgrounds/ML_initial_guess/heat.py @@ -0,0 +1,262 @@ +import numpy as np +import scipy.sparse as sp +from scipy.sparse.linalg import gmres, spsolve, cg +import torch +from pySDC.core.Errors import ProblemError +from pySDC.core.Problem import ptype, WorkCounter +from pySDC.helpers import problem_helper +from pySDC.implementations.datatype_classes.mesh import mesh +from pySDC.playgrounds.ML_initial_guess.tensor import Tensor +from pySDC.playgrounds.ML_initial_guess.sweeper import GenericImplicitML_IG +from pySDC.tutorial.step_1.A_spatial_problem_setup import run_accuracy_check +from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI +from pySDC.playgrounds.ML_initial_guess.ml_heat import HeatEquationModel + + +class Heat1DFDTensor(ptype): + """ + Very simple 1-dimensional finite differences implementation of a heat equation using the pySDC-PyTorch interface. + Still includes some mess. + """ + + dtype_u = Tensor + dtype_f = Tensor + + def __init__( + self, + nvars=256, + nu=1.0, + freq=4, + stencil_type='center', + order=2, + lintol=1e-12, + liniter=10000, + solver_type='direct', + bc='periodic', + bcParams=None, + ): + # make sure parameters have the correct types + if not type(nvars) in [int, tuple]: + raise ProblemError('nvars should be either tuple or int') + if not type(freq) in [int, tuple]: + raise ProblemError('freq should be either tuple or int') + + ndim = 1 + + # eventually extend freq to other dimension + if type(freq) is int: + freq = (freq,) * ndim + if len(freq) != ndim: + raise ProblemError(f'len(freq)={len(freq)}, different to ndim={ndim}') + + # check values for freq and nvars + for f in freq: + if ndim == 1 and f == -1: + # use Gaussian initial solution in 1D + bc = 'periodic' + break + if f % 2 != 0 and bc == 'periodic': + raise ProblemError('need even number of frequencies due to periodic BCs') + + # invoke super init, passing number of dofs + super().__init__(init=(torch.empty(size=(nvars,), dtype=torch.double), None, np.dtype('float64'))) + + dx, xvalues = problem_helper.get_1d_grid(size=nvars, bc=bc, left_boundary=0.0, right_boundary=1.0) + + self.A_, _ = problem_helper.get_finite_difference_matrix( + derivative=2, + order=order, + stencil_type=stencil_type, + dx=dx, + size=nvars, + dim=ndim, + bc=bc, + ) + self.A_ *= nu + self.A = torch.tensor(self.A_.todense()) + + self.xvalues = torch.tensor(xvalues, dtype=torch.double) + self.Id = torch.tensor((sp.eye(nvars, format='csc')).todense()) + + # store attribute and register them as parameters + self._makeAttributeAndRegister('nvars', 'stencil_type', 'order', 'bc', 'nu', localVars=locals(), readOnly=True) + self._makeAttributeAndRegister('freq', 'lintol', 'liniter', 'solver_type', localVars=locals()) + + if self.solver_type != 'direct': + self.work_counters[self.solver_type] = WorkCounter() + + @property + def ndim(self): + """Number of dimensions of the spatial problem""" + return 1 + + @property + def dx(self): + """Size of the mesh (in all dimensions)""" + return self.xvalues[1] - self.xvalues[0] + + @property + def grids(self): + """ND grids associated to the problem""" + x = self.xvalues + if self.ndim == 1: + return x + if self.ndim == 2: + return x[None, :], x[:, None] + if self.ndim == 3: + return x[None, :, None], x[:, None, None], x[None, None, :] + + def eval_f(self, u, t): + """ + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values. + t : float + Current time. + + Returns + ------- + f : dtype_f + Values of the right-hand side of the problem. + """ + f = self.f_init + f[:] = torch.matmul(self.A, u) + return f + + def ML_predict(self, u0, t0, dt): + """ + Predict the solution at t0+dt given initial conditions u0 + """ + # read in model + model = HeatEquationModel(self) + model.load_state_dict(torch.load('heat_equation_model.pth')) + model.eval() + + # evaluate model + predicted_state = model(u0, t0, dt) + sol = self.u_init + sol[:] = predicted_state.double()[:] + return sol + + def solve_system(self, rhs, factor, u0, t): + r""" + Simple linear solver for :math:`(I-factor\cdot A)\vec{u}=\vec{rhs}`. + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the linear system. + factor : float + Abbrev. for the local stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + sol : dtype_u + The solution of the linear solver. + """ + solver_type, Id, A, nvars, sol = ( + self.solver_type, + self.Id, + self.A, + self.nvars, + self.u_init, + ) + + if solver_type == 'direct': + sol[:] = torch.linalg.solve(Id - factor * A, rhs.flatten()).reshape(nvars) + # TODO: implement torch equivalent of cg + # elif solver_type == 'CG': + # sol[:] = cg( + # Id - factor * A, + # rhs.flatten(), + # x0=u0.flatten(), + # tol=lintol, + # maxiter=liniter, + # atol=0, + # callback=self.work_counters[solver_type], + # )[0].reshape(nvars) + else: + raise ValueError(f'solver type "{solver_type}" not known!') + + return sol + + def u_exact(self, t, **kwargs): + r""" + Routine to compute the exact solution at time :math:`t`. + + Parameters + ---------- + t : float + Time of the exact solution. + + Returns + ------- + sol : dtype_u + The exact solution. + """ + if 'u_init' in kwargs.keys() or 't_init' in kwargs.keys(): + self.logger.warning( + f'{type(self).__name__} uses an analytic exact solution from t=0. If you try to compute the local error, you will get the global error instead!' + ) + + ndim, freq, nu, dx, sol = self.ndim, self.freq, self.nu, self.dx, self.u_init + + if ndim == 1: + x = self.grids + rho = (2.0 - 2.0 * torch.cos(np.pi * freq[0] * dx)) / dx**2 + if freq[0] > 0: + sol[:] = torch.sin(np.pi * freq[0] * x) * torch.exp(-t * nu * rho) + else: + raise NotImplementedError + + return sol + + +def main(): + """ + A simple test program to setup a full step instance + """ + dt = 1e-2 + + level_params = dict() + level_params['restol'] = 1e-10 + level_params['dt'] = dt + + sweeper_params = dict() + sweeper_params['quad_type'] = 'RADAU-RIGHT' + sweeper_params['num_nodes'] = 3 + sweeper_params['QI'] = 'LU' + sweeper_params['initial_guess'] = 'NN' + + problem_params = dict() + + step_params = dict() + step_params['maxiter'] = 20 + + description = dict() + description['problem_class'] = Heat1DFDTensor + description['problem_params'] = problem_params + description['sweeper_class'] = GenericImplicitML_IG + description['sweeper_params'] = sweeper_params + description['level_params'] = level_params + description['step_params'] = step_params + + controller = controller_nonMPI(num_procs=1, controller_params={'logger_level': 20}, description=description) + + P = controller.MS[0].levels[0].prob + + uinit = P.u_exact(0) + uend, _ = controller.run(u0=uinit, t0=0, Tend=dt) + u_exact = P.u_exact(dt) + print("error ", torch.abs(u_exact - uend).max()) + + +if __name__ == "__main__": + main() diff --git a/pySDC/playgrounds/ML_initial_guess/ml_heat.py b/pySDC/playgrounds/ML_initial_guess/ml_heat.py new file mode 100644 index 0000000000..c3286869f0 --- /dev/null +++ b/pySDC/playgrounds/ML_initial_guess/ml_heat.py @@ -0,0 +1,130 @@ +import torch +import torch.nn as nn +import torch.optim as optim +import matplotlib.pyplot as plt +import numpy as np + + +class Train_pySDC: + """ + Interface between PyTorch and pySDC for training models. + + Attributes: + - problem: An instantiated problem from pySDC that allows evaluating the exact solution. + This should have the same parameters as the problem you run in pySDC later. + - model: A PyTorch model with some neural network to train, specific to the problem + """ + + def __init__(self, problem, model, use_exact=True): + self.problem = problem + self.model = model + self.use_exact = use_exact # use exact solution in problem class or backward Euler solution + + self.model.train(True) + + def generate_initial_condition(self, t): + return self.problem.u_exact(t) + + def generate_target_condition(self, initial_condition, t, dt): + if self.use_exact: + return self.problem.u_exact(t + dt) + else: + return self.problem.solve_system(initial_condition, dt, initial_condition, t) + + def train_model(self, initial_condition=None, t=None, dt=None, num_epochs=1000, lr=0.001): + model = self.model + + criterion = nn.MSELoss() + optimizer = optim.Adam(model.parameters(), lr=lr) + + # setup initial and target conditions + t = torch.rand(1) if t is None else t + dt = torch.rand(1) if dt is None else dt + initial_condition = self.generate_initial_condition(t) if initial_condition is None else initial_condition + target_condition = self.generate_target_condition(initial_condition, t, dt) + + # do the training + for epoch in range(num_epochs): + predicted_state = model(initial_condition, t, dt) + loss = criterion(predicted_state.float(), target_condition.float()) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + if (epoch + 1) % 100 == 0 or True: + print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}') + + def plot(self, initial_condition=None, t=None, dt=None): + t = torch.rand(1) if t is None else t + dt = torch.rand(1) if dt is None else dt + initial_condition = self.generate_initial_condition(t) if initial_condition is None else initial_condition + target = self.generate_target_condition(initial_condition, t, dt) + model_prediction = self.model(initial_condition, t, dt) + + fig, ax = plt.subplots() + ax.plot(self.problem.xvalues, initial_condition, label='ic') + ax.plot(self.problem.xvalues, target, label='target') + ax.plot(self.problem.xvalues, model_prediction.detach().numpy(), label='model') + ax.set_title(f't={t:.2e}, dt={dt:.2e}') + ax.legend() + + +class HeatEquationModel(nn.Module): + """ + Very simple model to learn the heat equation. Beware! It's too simple. + Some machine learning expert please fix this! + """ + + def __init__(self, problem, hidden_size=64): + self.input_size = problem.nvars * 3 + self.output_size = problem.nvars + + super().__init__() + + self.fc1 = nn.Linear(self.input_size, hidden_size) + self.relu = nn.ReLU() + self.fc2 = nn.Linear(hidden_size, self.output_size) + + # Initialize weights (example) + nn.init.xavier_uniform_(self.fc1.weight) + nn.init.xavier_uniform_(self.fc2.weight) + + def forward(self, x, t, dt): + # prepare individual tensors + x = x.float() + _t = torch.ones_like(x) * t + _dt = torch.ones_like(x) * dt + + # Concatenate t and dt with the input x + _x = torch.cat((x, _t, _dt), dim=0) + + _x = self.fc1(_x) + _x = self.relu(_x) + _x = self.fc2(_x) + return _x + + +def train_at_collocation_nodes(): + """ + For the first proof of concept, we want to train the model specifically to the collocation nodes we use in SDC. + If successful, the initial guess would already be the exact solution and we would need no SDC iterations. + Alas, this neural network is too simple... We need **you** to fix it! + """ + collocation_nodes = np.array([0.15505102572168285, 1, 0.6449489742783183]) * 1e-2 + + from pySDC.playgrounds.ML_initial_guess.heat import Heat1DFDTensor + + prob = Heat1DFDTensor() + model = HeatEquationModel(prob) + trainer = Train_pySDC(prob, model, use_exact=True) + for dt in collocation_nodes: + trainer.train_model(num_epochs=50, t=0, dt=dt) + for dt in collocation_nodes: + trainer.plot(t=0, dt=dt) + torch.save(model.state_dict(), 'heat_equation_model.pth') + plt.show() + + +if __name__ == '__main__': + train_at_collocation_nodes() diff --git a/pySDC/playgrounds/ML_initial_guess/sweeper.py b/pySDC/playgrounds/ML_initial_guess/sweeper.py new file mode 100644 index 0000000000..df30979f76 --- /dev/null +++ b/pySDC/playgrounds/ML_initial_guess/sweeper.py @@ -0,0 +1,24 @@ +from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit + + +class GenericImplicitML_IG(generic_implicit): + def predict(self): + """ + Initialise node with machine learning initial guess + """ + if self.params.initial_guess != 'NN': + return super().predict() + + L = self.level + P = L.prob + + # evaluate RHS at left point + L.f[0] = P.eval_f(L.u[0], L.time) + + for m in range(1, self.coll.num_nodes + 1): + L.u[m] = P.ML_predict(L.u[0], L.time, L.dt * self.coll.nodes[m - 1]) + L.f[m] = P.eval_f(L.u[m], L.time + L.dt * self.coll.nodes[m - 1]) + + # indicate that this level is now ready for sweeps + L.status.unlocked = True + L.status.updated = True diff --git a/pySDC/playgrounds/ML_initial_guess/tensor.py b/pySDC/playgrounds/ML_initial_guess/tensor.py new file mode 100644 index 0000000000..c28c321213 --- /dev/null +++ b/pySDC/playgrounds/ML_initial_guess/tensor.py @@ -0,0 +1,131 @@ +import numpy as np +import torch + +from pySDC.core.Errors import DataError + +try: + # TODO : mpi4py cannot be imported before dolfin when using fenics mesh + # see https://github.com/Parallel-in-Time/pySDC/pull/285#discussion_r1145850590 + # This should be dealt with at some point + from mpi4py import MPI +except ImportError: + MPI = None + + +class Tensor(torch.Tensor): + """ + Wrapper for PyTorch tensor. + Be aware that this is totally WIP! Should be fine to count iterations, but desperately needs cleaning up if this project goes much further! + + TODO: Have to update `torch/multiprocessing/reductions.py` in order to share this datatype across processes. + + Attributes: + _comm: MPI communicator or None + """ + + @staticmethod + def __new__(cls, init, val=0.0, *args, **kwargs): + """ + Instantiates new datatype. This ensures that even when manipulating data, the result is still a mesh. + + Args: + init: either another mesh or a tuple containing the dimensions, the communicator and the dtype + val: value to initialize + + Returns: + obj of type mesh + + """ + if isinstance(init, Tensor): + obj = super().__new__(cls, init) + obj[:] = init[:] + obj._comm = init._comm + elif ( + isinstance(init, tuple) + # and (init[1] is None or isinstance(init[1], MPI.Intracomm)) + # and isinstance(init[2], np.dtype) + ): + obj = super().__new__(cls, init[0].clone()) + obj.fill_(val) + obj._comm = init[1] + else: + raise NotImplementedError(type(init)) + return obj + + @property + def comm(self): + """ + Getter for the communicator + """ + return self._comm + + def __array_finalize__(self, obj): + """ + Finalizing the datatype. Without this, new datatypes do not 'inherit' the communicator. + """ + if obj is None: + return + self._comm = getattr(obj, '_comm', None) + + def __abs__(self): + """ + Overloading the abs operator + + Returns: + float: absolute maximum of all mesh values + """ + # take absolute values of the mesh values + local_absval = float(torch.amax(torch.abs(self))) + + if self.comm is not None: + if self.comm.Get_size() > 1: + global_absval = 0.0 + global_absval = max(self.comm.allreduce(sendobj=local_absval, op=MPI.MAX), global_absval) + else: + global_absval = local_absval + else: + global_absval = local_absval + + return float(global_absval) + + def isend(self, dest=None, tag=None, comm=None): + """ + Routine for sending data forward in time (non-blocking) + + Args: + dest (int): target rank + tag (int): communication tag + comm: communicator + + Returns: + request handle + """ + return comm.Issend(self[:], dest=dest, tag=tag) + + def irecv(self, source=None, tag=None, comm=None): + """ + Routine for receiving in time + + Args: + source (int): source rank + tag (int): communication tag + comm: communicator + + Returns: + None + """ + return comm.Irecv(self[:], source=source, tag=tag) + + def bcast(self, root=None, comm=None): + """ + Routine for broadcasting values + + Args: + root (int): process with value to broadcast + comm: communicator + + Returns: + broadcasted values + """ + comm.Bcast(self[:], root=root) + return self diff --git a/pySDC/projects/DAE/misc/DAEMesh.py b/pySDC/projects/DAE/misc/DAEMesh.py new file mode 100644 index 0000000000..cf10e949c3 --- /dev/null +++ b/pySDC/projects/DAE/misc/DAEMesh.py @@ -0,0 +1,12 @@ +from pySDC.implementations.datatype_classes.mesh import MultiComponentMesh + + +class DAEMesh(MultiComponentMesh): + r""" + Datatype for DAE problems. The solution of the problem can be splitted in the differential part + and in an algebraic part. + + This data type can be used for the solution of the problem itself as well as for its derivative. + """ + + components = ['diff', 'alg'] diff --git a/pySDC/projects/DAE/misc/HookClass_DAE.py b/pySDC/projects/DAE/misc/HookClass_DAE.py index 4ce9f12e57..df6d08e25f 100644 --- a/pySDC/projects/DAE/misc/HookClass_DAE.py +++ b/pySDC/projects/DAE/misc/HookClass_DAE.py @@ -1,77 +1,75 @@ from pySDC.core.Hooks import hooks -class approx_solution_hook(hooks): +class LogGlobalErrorPostStepDifferentialVariable(hooks): """ - Hook class to add the approximate solution to the output generated by the sweeper after each time step + Hook class to log the error to the output generated by the sweeper after + each time step. """ - def __init__(self): - """ - Initialization routine for the custom hook - """ - super(approx_solution_hook, self).__init__() - def post_step(self, step, level_number): - """ - Default routine called after each step - Args: - step: the current step - level_number: the current level number + r""" + Default routine called after each step. + + Parameters + ---------- + step : pySDC.core.Step + Current step. + level_number : pySDC.core.level + Current level number. """ - super(approx_solution_hook, self).post_step(step, level_number) + super().post_step(step, level_number) # some abbreviations L = step.levels[level_number] + P = L.prob # TODO: is it really necessary to recompute the end point? Hasn't this been done already? L.sweep.compute_end_point() + # compute and save errors + # Note that the component from which the error is measured is specified here + upde = P.u_exact(step.time + step.dt) + e_global_differential = abs(upde.diff - L.uend.diff) + self.add_to_stats( process=step.status.slot, time=L.time + L.dt, level=L.level_index, iter=step.status.iter, sweep=L.status.sweep, - type='approx_solution', - value=L.uend, + type='e_global_differential_post_step', + value=e_global_differential, ) -class error_hook(hooks): +class LogGlobalErrorPostStepAlgebraicVariable(hooks): """ - Hook class to add the approximate solution to the output generated by the sweeper after each time step + Logs the global error in the algebraic variable """ - def __init__(self): - """ - Initialization routine for the custom hook - """ - super(error_hook, self).__init__() - def post_step(self, step, level_number): - """ - Default routine called after each step - Args: - step: the current step - level_number: the current level number + r""" + Default routine called after each step. + + Parameters + ---------- + step : pySDC.core.Step + Current step. + level_number : pySDC.core.level + Current level number. """ - super(error_hook, self).post_step(step, level_number) + super().post_step(step, level_number) - # some abbreviations L = step.levels[level_number] P = L.prob - # TODO: is it really necessary to recompute the end point? Hasn't this been done already? L.sweep.compute_end_point() - # compute and save errors - # Note that the component from which the error is measured is specified here upde = P.u_exact(step.time + step.dt) - err = abs(upde[0] - L.uend[0]) - # err = abs(upde[4] - L.uend[4]) + e_global_algebraic = abs(upde.alg - L.uend.alg) self.add_to_stats( process=step.status.slot, @@ -79,6 +77,6 @@ def post_step(self, step, level_number): level=L.level_index, iter=step.status.iter, sweep=L.status.sweep, - type='error_post_step', - value=err, + type='e_global_algebraic_post_step', + value=e_global_algebraic, ) diff --git a/pySDC/projects/DAE/misc/ProblemDAE.py b/pySDC/projects/DAE/misc/ProblemDAE.py index 08cc16a3a7..931d466b79 100644 --- a/pySDC/projects/DAE/misc/ProblemDAE.py +++ b/pySDC/projects/DAE/misc/ProblemDAE.py @@ -2,7 +2,7 @@ from scipy.optimize import root from pySDC.core.Problem import ptype, WorkCounter -from pySDC.implementations.datatype_classes.mesh import mesh +from pySDC.projects.DAE.misc.DAEMesh import DAEMesh class ptype_dae(ptype): @@ -25,8 +25,8 @@ class ptype_dae(ptype): in work_counters['rhs'] """ - dtype_u = mesh - dtype_f = mesh + dtype_u = DAEMesh + dtype_f = DAEMesh def __init__(self, nvars, newton_tol): """Initialization routine""" @@ -54,14 +54,18 @@ def solve_system(self, impl_sys, u0, t): me : dtype_u Numerical solution. """ - me = self.dtype_u(self.init) + + def implSysFlatten(unknowns, **kwargs): + sys = impl_sys(unknowns.reshape(me.shape).view(type(u0)), **kwargs) + return sys.flatten() + opt = root( - impl_sys, + implSysFlatten, u0, method='hybr', tol=self.newton_tol, ) - me[:] = opt.x + me[:] = opt.x.reshape(me.shape) self.work_counters['newton'].niter += opt.nfev return me diff --git a/pySDC/projects/DAE/problems/DiscontinuousTestDAE.py b/pySDC/projects/DAE/problems/DiscontinuousTestDAE.py index 8df106a828..3b676d8cec 100644 --- a/pySDC/projects/DAE/problems/DiscontinuousTestDAE.py +++ b/pySDC/projects/DAE/problems/DiscontinuousTestDAE.py @@ -1,5 +1,6 @@ import numpy as np +from pySDC.core.Problem import WorkCounter from pySDC.projects.DAE.misc.ProblemDAE import ptype_dae @@ -57,14 +58,12 @@ class DiscontinuousTestDAE(ptype_dae): def __init__(self, newton_tol=1e-12): """Initialization routine""" - nvars = 2 - super().__init__(nvars, newton_tol) - self._makeAttributeAndRegister('nvars', localVars=locals(), readOnly=True) - self._makeAttributeAndRegister('newton_tol', localVars=locals()) + super().__init__(nvars=2, newton_tol=newton_tol) self.t_switch_exact = np.arccosh(50) self.t_switch = None self.nswitches = 0 + self.work_counters['rhs'] = WorkCounter() def eval_f(self, u, du, t): r""" @@ -85,8 +84,8 @@ def eval_f(self, u, du, t): The right-hand side of f (contains two components). """ - y, z = u[0], u[1] - dy = du[0] + y, z = u.diff[0], u.alg[0] + dy = du.diff[0] t_switch = np.inf if self.t_switch is None else self.t_switch @@ -94,15 +93,12 @@ def eval_f(self, u, du, t): f = self.dtype_f(self.init) if h >= 0 or t >= t_switch: - f[:] = ( - dy, - y**2 - z**2 - 1, - ) + f.diff[0] = dy + f.alg[0] = y**2 - z**2 - 1 else: - f[:] = ( - dy - z, - y**2 - z**2 - 1, - ) + f.diff[0] = dy - z + f.alg[0] = y**2 - z**2 - 1 + self.work_counters['rhs']() return f def u_exact(self, t, **kwargs): @@ -125,9 +121,11 @@ def u_exact(self, t, **kwargs): me = self.dtype_u(self.init) if t <= self.t_switch_exact: - me[:] = (np.cosh(t), np.sinh(t)) + me.diff[0] = np.cosh(t) + me.alg[0] = np.sinh(t) else: - me[:] = (np.cosh(self.t_switch_exact), np.sinh(self.t_switch_exact)) + me.diff[0] = np.cosh(self.t_switch_exact) + me.alg[0] = np.sinh(self.t_switch_exact) return me def get_switching_info(self, u, t): @@ -162,14 +160,14 @@ def get_switching_info(self, u, t): m_guess = -100 for m in range(1, len(u)): - h_prev_node = 2 * u[m - 1][0] - 100 - h_curr_node = 2 * u[m][0] - 100 + h_prev_node = 2 * u[m - 1].diff[0] - 100 + h_curr_node = 2 * u[m].diff[0] - 100 if h_prev_node < 0 and h_curr_node >= 0: switch_detected = True m_guess = m - 1 break - state_function = [2 * u[m][0] - 100 for m in range(len(u))] + state_function = [2 * u[m].diff[0] - 100 for m in range(len(u))] return switch_detected, m_guess, state_function def count_switches(self): diff --git a/pySDC/projects/DAE/problems/WSCC9BusSystem.py b/pySDC/projects/DAE/problems/WSCC9BusSystem.py index ad472dc636..3867f0443a 100644 --- a/pySDC/projects/DAE/problems/WSCC9BusSystem.py +++ b/pySDC/projects/DAE/problems/WSCC9BusSystem.py @@ -1,6 +1,5 @@ import numpy as np from pySDC.projects.DAE.misc.ProblemDAE import ptype_dae -from pySDC.implementations.datatype_classes.mesh import mesh from pySDC.core.Errors import ParameterError @@ -763,16 +762,12 @@ class WSCC9BusSystem(ptype_dae): for Power Systems Research and Education. IEEE Transactions on Power Systems. Vol. 26, No. 1, pp. 12–19 (2011). """ - dtype_u = mesh - dtype_f = mesh - - def __init__(self, nvars=None, newton_tol=1e-10, m=3, n=9): + def __init__(self, newton_tol=1e-10): """Initialization routine""" - + m, n = 3, 9 nvars = 11 * m + 2 * m + 2 * n # invoke super init, passing number of dofs - super().__init__(nvars, newton_tol) - self._makeAttributeAndRegister('nvars', 'newton_tol', localVars=locals(), readOnly=True) + super().__init__(nvars=nvars, newton_tol=newton_tol) self._makeAttributeAndRegister('m', 'n', localVars=locals()) self.mpc = WSCC9Bus() @@ -993,19 +988,31 @@ def eval_f(self, u, du, t): The right-hand side of f (contains two components). """ - dEqp, dSi1d, dEdp = du[0 : self.m], du[self.m : 2 * self.m], du[2 * self.m : 3 * self.m] - dSi2q, dDelta = du[3 * self.m : 4 * self.m], du[4 * self.m : 5 * self.m] - dw, dEfd, dRF = du[5 * self.m : 6 * self.m], du[6 * self.m : 7 * self.m], du[7 * self.m : 8 * self.m] - dVR, dTM, dPSV = du[8 * self.m : 9 * self.m], du[9 * self.m : 10 * self.m], du[10 * self.m : 11 * self.m] + dEqp, dSi1d, dEdp = du.diff[0 : self.m], du.diff[self.m : 2 * self.m], du.diff[2 * self.m : 3 * self.m] + dSi2q, dDelta = du.diff[3 * self.m : 4 * self.m], du.diff[4 * self.m : 5 * self.m] + dw, dEfd, dRF = ( + du.diff[5 * self.m : 6 * self.m], + du.diff[6 * self.m : 7 * self.m], + du.diff[7 * self.m : 8 * self.m], + ) + dVR, dTM, dPSV = ( + du.diff[8 * self.m : 9 * self.m], + du.diff[9 * self.m : 10 * self.m], + du.diff[10 * self.m : 11 * self.m], + ) - Eqp, Si1d, Edp = u[0 : self.m], u[self.m : 2 * self.m], u[2 * self.m : 3 * self.m] - Si2q, Delta = u[3 * self.m : 4 * self.m], u[4 * self.m : 5 * self.m] - w, Efd, RF = u[5 * self.m : 6 * self.m], u[6 * self.m : 7 * self.m], u[7 * self.m : 8 * self.m] - VR, TM, PSV = u[8 * self.m : 9 * self.m], u[9 * self.m : 10 * self.m], u[10 * self.m : 11 * self.m] + Eqp, Si1d, Edp = u.diff[0 : self.m], u.diff[self.m : 2 * self.m], u.diff[2 * self.m : 3 * self.m] + Si2q, Delta = u.diff[3 * self.m : 4 * self.m], u.diff[4 * self.m : 5 * self.m] + w, Efd, RF = u.diff[5 * self.m : 6 * self.m], u.diff[6 * self.m : 7 * self.m], u.diff[7 * self.m : 8 * self.m] + VR, TM, PSV = ( + u.diff[8 * self.m : 9 * self.m], + u.diff[9 * self.m : 10 * self.m], + u.diff[10 * self.m : 11 * self.m], + ) - Id, Iq = u[11 * self.m : 11 * self.m + self.m], u[11 * self.m + self.m : 11 * self.m + 2 * self.m] - V = u[11 * self.m + 2 * self.m : 11 * self.m + 2 * self.m + self.n] - TH = u[11 * self.m + 2 * self.m + self.n : 11 * self.m + 2 * self.m + 2 * self.n] + Id, Iq = u.alg[0 : self.m], u.alg[self.m : 2 * self.m] + V = u.alg[2 * self.m : 2 * self.m + self.n] + TH = u.alg[2 * self.m + self.n : 2 * self.m + 2 * self.n] # line outage disturbance: if t >= 0.05: @@ -1137,7 +1144,8 @@ def eval_f(self, u, du, t): eqs.append(-QL2[self.m : self.n] - sum4) # (17) eqs_flatten = [item for sublist in eqs for item in sublist] - f[:] = eqs_flatten + f.diff[: 11 * self.m] = eqs_flatten[0 : 11 * self.m] + f.alg[: 2 * self.n + 2 * self.m] = eqs_flatten[11 * self.m :] return f def u_exact(self, t): @@ -1157,24 +1165,24 @@ def u_exact(self, t): assert t == 0, 'ERROR: u_exact only valid for t=0' me = self.dtype_u(self.init) - me[0 : self.m] = self.Eqp0 - me[self.m : 2 * self.m] = self.Si1d0 - me[2 * self.m : 3 * self.m] = self.Edp0 - me[3 * self.m : 4 * self.m] = self.Si2q0 - me[4 * self.m : 5 * self.m] = self.D0 - me[5 * self.m : 6 * self.m] = self.ws_vector - me[6 * self.m : 7 * self.m] = self.Efd0 - me[7 * self.m : 8 * self.m] = self.RF0 - me[8 * self.m : 9 * self.m] = self.VR0 - me[9 * self.m : 10 * self.m] = self.TM0 - me[10 * self.m : 11 * self.m] = self.PSV0 - me[11 * self.m : 11 * self.m + self.m] = self.Id0 - me[11 * self.m + self.m : 11 * self.m + 2 * self.m] = self.Iq0 - me[11 * self.m + 2 * self.m : 11 * self.m + 2 * self.m + self.n] = self.V0 - me[11 * self.m + 2 * self.m + self.n : 11 * self.m + 2 * self.m + 2 * self.n] = self.TH0 + me.diff[0 : self.m] = self.Eqp0 + me.diff[self.m : 2 * self.m] = self.Si1d0 + me.diff[2 * self.m : 3 * self.m] = self.Edp0 + me.diff[3 * self.m : 4 * self.m] = self.Si2q0 + me.diff[4 * self.m : 5 * self.m] = self.D0 + me.diff[5 * self.m : 6 * self.m] = self.ws_vector + me.diff[6 * self.m : 7 * self.m] = self.Efd0 + me.diff[7 * self.m : 8 * self.m] = self.RF0 + me.diff[8 * self.m : 9 * self.m] = self.VR0 + me.diff[9 * self.m : 10 * self.m] = self.TM0 + me.diff[10 * self.m : 11 * self.m] = self.PSV0 + me.alg[0 : self.m] = self.Id0 + me.alg[self.m : 2 * self.m] = self.Iq0 + me.alg[2 * self.m : 2 * self.m + self.n] = self.V0 + me.alg[2 * self.m + self.n : 2 * self.m + 2 * self.n] = self.TH0 return me - def get_switching_info(self, u, t, du=None): + def get_switching_info(self, u, t): r""" Provides information about the state function of the problem. When the state function changes its sign, typically an event occurs. So the check for an event should be done in the way that the state function @@ -1208,14 +1216,14 @@ def get_switching_info(self, u, t, du=None): switch_detected = False m_guess = -100 for m in range(1, len(u)): - h_prev_node = u[m - 1][10 * self.m] - self.psv_max - h_curr_node = u[m][10 * self.m] - self.psv_max + h_prev_node = u[m - 1].diff[10 * self.m] - self.psv_max + h_curr_node = u[m].diff[10 * self.m] - self.psv_max if h_prev_node < 0 and h_curr_node >= 0: switch_detected = True m_guess = m - 1 break - state_function = [u[m][10 * self.m] - self.psv_max for m in range(len(u))] + state_function = [u[m].diff[10 * self.m] - self.psv_max for m in range(len(u))] return switch_detected, m_guess, state_function def count_switches(self): diff --git a/pySDC/projects/DAE/problems/simple_DAE.py b/pySDC/projects/DAE/problems/simple_DAE.py index 343128624a..31be74e809 100644 --- a/pySDC/projects/DAE/problems/simple_DAE.py +++ b/pySDC/projects/DAE/problems/simple_DAE.py @@ -3,6 +3,7 @@ from scipy.interpolate import interp1d from pySDC.projects.DAE.misc.ProblemDAE import ptype_dae +from pySDC.implementations.datatype_classes.mesh import mesh class pendulum_2d(ptype_dae): @@ -11,15 +12,22 @@ class pendulum_2d(ptype_dae): The DAE system is given by the equations .. math:: - x' = u, + \frac{dp}{dt} = u, .. math:: - \frac{d}{dt} \frac{\partial}{\partial u} L = \frac{\partial L}{\partial x} + f + G^{T} \lambda, + \frac{dq}{dt} = v, .. math:: - 0 = \phi. + m\frac{du}{dt} = -p \lambda, - The pendulum is used in most introductory literature on DAEs, for example on page 8 of [1]_. + .. math:: + m\frac{dv}{dt} = -q \lambda - g, + + .. math:: + 0 = p^2 + q^2 - l^2 + + for :math:`l=1` and :math:`m=1`. The pendulum is used in most introductory literature on DAEs, for example on page 8 + of [1]_. Parameters ---------- @@ -39,9 +47,9 @@ class pendulum_2d(ptype_dae): Lect. Notes Math. (1989). """ - def __init__(self, nvars, newton_tol): + def __init__(self, newton_tol): """Initialization routine""" - super().__init__(nvars, newton_tol) + super().__init__(nvars=5, newton_tol=newton_tol) # load reference solution # data file must be generated and stored under misc/data and self.t_end = t[-1] # data = np.load(r'pySDC/projects/DAE/misc/data/pendulum.npy') @@ -72,7 +80,13 @@ def eval_f(self, u, du, t): # The last element of u is a Lagrange multiplier. Not sure if this needs to be time dependent, but must model the # weight somehow f = self.dtype_f(self.init) - f[:] = (du[0] - u[2], du[1] - u[3], du[2] + u[4] * u[0], du[3] + u[4] * u[1] + g, u[0] ** 2 + u[1] ** 2 - 1) + f.diff[:4] = ( + du.diff[0] - u.diff[2], + du.diff[1] - u.diff[3], + du.diff[2] + u.alg[0] * u.diff[0], + du.diff[3] + u.alg[0] * u.diff[1] + g, + ) + f.alg[0] = u.diff[0] ** 2 + u.diff[1] ** 2 - 1 self.work_counters['rhs']() return f @@ -92,12 +106,16 @@ def u_exact(self, t): """ me = self.dtype_u(self.init) if t == 0: - me[:] = (-1, 0, 0, 0, 0) + me.diff[:4] = (-1, 0, 0, 0) + me.alg[0] = 0 elif t < self.t_end: - me[:] = self.u_ref(t) + u_ref = self.u_ref(t) + me.diff[:4] = u_ref[:4] + me.alg[0] = u_ref[5] else: self.logger.warning("Requested time exceeds domain of the reference solution. Returning zero.") - me[:] = (0, 0, 0, 0, 0) + me.diff[:4] = (0, 0, 0, 0) + me.alg[0] = 0 return me @@ -139,6 +157,10 @@ class simple_dae_1(ptype_dae): equations. Society for Industrial and Applied Mathematics (1998). """ + def __init__(self, newton_tol=1e-10): + """Initialization routine""" + super().__init__(nvars=3, newton_tol=newton_tol) + def eval_f(self, u, du, t): r""" Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`. @@ -160,11 +182,12 @@ def eval_f(self, u, du, t): # Smooth index-2 DAE pg. 267 Ascher and Petzold (also the first example in KDC Minion paper) a = 10.0 f = self.dtype_f(self.init) - f[:] = ( - -du[0] + (a - 1 / (2 - t)) * u[0] + (2 - t) * a * u[2] + np.exp(t) * (3 - t) / (2 - t), - -du[1] + (1 - a) / (t - 2) * u[0] - u[1] + (a - 1) * u[2] + 2 * np.exp(t), - (t + 2) * u[0] + (t**2 - 4) * u[1] - (t**2 + t - 2) * np.exp(t), + + f.diff[:2] = ( + -du.diff[0] + (a - 1 / (2 - t)) * u.diff[0] + (2 - t) * a * u.alg[0] + (3 - t) / (2 - t) * np.exp(t), + -du.diff[1] + (1 - a) / (t - 2) * u.diff[0] - u.diff[1] + (a - 1) * u.alg[0] + 2 * np.exp(t), ) + f.alg[0] = (t + 2) * u.diff[0] + (t**2 - 4) * u.diff[1] - (t**2 + t - 2) * np.exp(t) self.work_counters['rhs']() return f @@ -183,7 +206,8 @@ def u_exact(self, t): The reference solution as mesh object containing three components. """ me = self.dtype_u(self.init) - me[:] = (np.exp(t), np.exp(t), -np.exp(t) / (2 - t)) + me.diff[:2] = (np.exp(t), np.exp(t)) + me.alg[0] = -np.exp(t) / (2 - t) return me @@ -193,10 +217,10 @@ class problematic_f(ptype_dae): numerically solvable for certain choices of the parameter :math:`\eta`. The DAE system is given by .. math:: - y (t) + \eta t z (t) = f(t), + \frac{d y(t)}{dt} + \eta t \frac{d z(t)}{dt} + (1 + \eta) z (t) = g (t). .. math:: - \frac{d y(t)}{dt} + \eta t \frac{d z(t)}{dt} + (1 + \eta) z (t) = g (t). + y (t) + \eta t z (t) = f(t), See, for example, page 264 of [1]_. @@ -218,9 +242,12 @@ class problematic_f(ptype_dae): equations. Society for Industrial and Applied Mathematics (1998). """ - def __init__(self, nvars, newton_tol, eta=1): + dtype_u = mesh + dtype_f = mesh + + def __init__(self, newton_tol, eta=1): """Initialization routine""" - super().__init__(nvars, newton_tol) + super().__init__(nvars=2, newton_tol=newton_tol) self._makeAttributeAndRegister('eta', localVars=locals()) def eval_f(self, u, du, t): diff --git a/pySDC/projects/DAE/problems/synchronous_machine.py b/pySDC/projects/DAE/problems/synchronous_machine.py index 5510f0dd46..9042325303 100644 --- a/pySDC/projects/DAE/problems/synchronous_machine.py +++ b/pySDC/projects/DAE/problems/synchronous_machine.py @@ -149,8 +149,8 @@ class synchronous_machine_infinite_bus(ptype_dae): .. [1] P. Kundur, N. J. Balu, M. G. Lauby. Power system stability and control. The EPRI power system series (1994). """ - def __init__(self, nvars, newton_tol): - super(synchronous_machine_infinite_bus, self).__init__(nvars, newton_tol) + def __init__(self, newton_tol): + super().__init__(nvars=14, newton_tol=newton_tol) # load reference solution # data file must be generated and stored under misc/data and self.t_end = t[-1] # data = np.load(r'pySDC/projects/DAE/misc/data/synch_gen.npy') @@ -217,16 +217,22 @@ def eval_f(self, u, du, t): # extract variables for readability # algebraic components - psi_d, psi_q, psi_F, psi_D, psi_Q1, psi_Q2 = u[0], u[1], u[2], u[3], u[4], u[5] - i_d, i_q, i_F, i_D, i_Q1, i_Q2 = u[6], u[7], u[8], u[9], u[10], u[11] - delta_r = u[12] - omega_m = u[13] + psi_d, psi_q, psi_F, psi_D, psi_Q1, psi_Q2 = u.diff[0], u.diff[1], u.diff[2], u.diff[3], u.diff[4], u.diff[5] + delta_r, omega_m = u.diff[6], u.diff[7] + i_d, i_q, i_F, i_D, i_Q1, i_Q2 = u.alg[0], u.alg[1], u.alg[2], u.alg[3], u.alg[4], u.alg[5] # differential components # these result directly from the voltage equations, introduced e.g. pg. 145 Krause - dpsi_d, dpsi_q, dpsi_F, dpsi_D, dpsi_Q1, dpsi_Q2 = du[0], du[1], du[2], du[3], du[4], du[5] - ddelta_r = du[12] - domega_m = du[13] + dpsi_d, dpsi_q, dpsi_F, dpsi_D, dpsi_Q1, dpsi_Q2 = ( + du.diff[0], + du.diff[1], + du.diff[2], + du.diff[3], + du.diff[4], + du.diff[5], + ) + ddelta_r, domega_m = du.diff[6], du.diff[7] + # Network current I_Re = i_d * np.sin(delta_r) + i_q * np.cos(delta_r) I_Im = -i_d * np.cos(delta_r) + i_q * np.sin(delta_r) @@ -237,9 +243,7 @@ def eval_f(self, u, du, t): v_d = np.real(V_comp) * np.sin(delta_r) - np.imag(V_comp) * np.cos(delta_r) v_q = np.real(V_comp) * np.cos(delta_r) + np.imag(V_comp) * np.sin(delta_r) - # algebraic variables are i_d, i_q, i_F, i_D, i_Q1, i_Q2, il_d, il_q - f[:] = ( - # differential generator + f.diff[:8] = ( -dpsi_d + self.omega_b * (v_d - self.R_s * i_d + omega_m * psi_q), -dpsi_q + self.omega_b * (v_q - self.R_s * i_q - omega_m * psi_d), -dpsi_F + self.omega_b * (self.v_F - self.R_F * i_F), @@ -249,7 +253,8 @@ def eval_f(self, u, du, t): -ddelta_r + self.omega_b * (omega_m - 1), -domega_m + 1 / (2 * self.H_) * (self.T_m - (psi_q * i_d - psi_d * i_q) - self.K_D * self.omega_b * (omega_m - 1)), - # algebraic generator + ) + f.alg[:6] = ( -psi_d + self.L_d * i_d + self.L_md * i_F + self.L_md * i_D, -psi_q + self.L_q * i_q + self.L_mq * i_Q1 + self.L_mq * i_Q2, -psi_F + self.L_md * i_d + self.L_F * i_F + self.L_md * i_D, @@ -296,12 +301,16 @@ def u_exact(self, t): omega_b = 2 * np.pi * 60 omega_m = omega_0 / omega_b # = omega_r since pf = 2 i.e. two pole machine - me[:] = (psi_d, psi_q, psi_F, psi_D, psi_Q1, psi_Q2, i_d, i_q, i_F, i_D, i_Q1, i_Q2, delta_r, omega_m) + me.diff[:8] = (psi_d, psi_q, psi_F, psi_D, psi_Q1, psi_Q2, delta_r, omega_m) + me.alg[:6] = (i_d, i_q, i_F, i_D, i_Q1, i_Q2) elif t < self.t_end: - me[:] = self.u_ref(t) + u_ref = self.u_ref(t) + me.diff[:8] = u_ref[:8] + me.alg[:6] = u_ref[8:] else: self.logger.warning("Requested time exceeds domain of the reference solution. Returning zero.") - me[:] = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + me.diff[:8] = (0, 0, 0, 0, 0, 0, 0, 0) + me.alg[:6] = (0, 0, 0, 0, 0, 0) return me diff --git a/pySDC/projects/DAE/problems/transistor_amplifier.py b/pySDC/projects/DAE/problems/transistor_amplifier.py index ae2adeb9cc..bc0bb9cf88 100644 --- a/pySDC/projects/DAE/problems/transistor_amplifier.py +++ b/pySDC/projects/DAE/problems/transistor_amplifier.py @@ -3,6 +3,7 @@ from scipy.interpolate import interp1d from pySDC.projects.DAE.misc.ProblemDAE import ptype_dae +from pySDC.implementations.datatype_classes.mesh import mesh # Helper function @@ -12,7 +13,7 @@ def _transistor(u_in): class one_transistor_amplifier(ptype_dae): r""" - The one transistor amplifier example from pg. 404 in [1]_. The problem is an index-1 differential-algebraic equation + The one transistor amplifier example from pg. 377 in [1]_. The problem is an index-1 differential-algebraic equation (DAE) having the equations .. math:: @@ -61,8 +62,11 @@ class one_transistor_amplifier(ptype_dae): Springer (2009). """ - def __init__(self, nvars, newton_tol): - super().__init__(nvars, newton_tol) + dtype_u = mesh + dtype_f = mesh + + def __init__(self, newton_tol): + super().__init__(nvars=5, newton_tol=newton_tol) # load reference solution # data file must be generated and stored under misc/data and self.t_end = t[-1] # data = np.load(r'pySDC/projects/DAE/misc/data/one_trans_amp.npy') @@ -195,8 +199,11 @@ class two_transistor_amplifier(ptype_dae): Lect. Notes Math. (1989). """ - def __init__(self, nvars, newton_tol): - super().__init__(nvars, newton_tol) + dtype_u = mesh + dtype_f = mesh + + def __init__(self, newton_tol): + super().__init__(nvars=8, newton_tol=newton_tol) # load reference solution # data file must be generated and stored under misc/data and self.t_end = t[-1] # data = np.load(r'pySDC/projects/DAE/misc/data/two_trans_amp.npy') diff --git a/pySDC/projects/DAE/run/fully_implicit_dae_playground.py b/pySDC/projects/DAE/run/fully_implicit_dae_playground.py index e4f0918855..72c50311b5 100644 --- a/pySDC/projects/DAE/run/fully_implicit_dae_playground.py +++ b/pySDC/projects/DAE/run/fully_implicit_dae_playground.py @@ -5,9 +5,9 @@ from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.projects.DAE.problems.simple_DAE import problematic_f from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE -from pySDC.projects.DAE.misc.HookClass_DAE import approx_solution_hook -from pySDC.projects.DAE.misc.HookClass_DAE import error_hook +from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostStep from pySDC.helpers.stats_helper import get_sorted +from pySDC.implementations.hooks.log_solution import LogSolution def main(): @@ -26,8 +26,7 @@ def main(): # initialize problem parameters problem_params = dict() - problem_params['newton_tol'] = 1e-12 # tollerance for implicit solver - problem_params['nvars'] = 2 + problem_params['newton_tol'] = 1e-12 # initialize step parameters step_params = dict() @@ -36,7 +35,7 @@ def main(): # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - controller_params['hook_class'] = [approx_solution_hook, error_hook] + controller_params['hook_class'] = [LogSolution, LogGlobalErrorPostStep] # Fill description dictionary for easy hierarchy creation description = dict() @@ -64,15 +63,15 @@ def main(): uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) # check error - err = get_sorted(stats, type='error_post_step', sortby='time') + err = get_sorted(stats, type='e_global_post_step', sortby='time') err = np.linalg.norm([err[i][1] for i in range(len(err))], np.inf) print(f"Error is {err}") assert np.isclose(err, 0.0, atol=1e-4), "Error too large." # store results - sol = get_sorted(stats, type='approx_solution_hook', sortby='time') + sol = get_sorted(stats, type='u', sortby='time') sol_dt = np.array([sol[i][0] for i in range(len(sol))]) - sol_data = np.array([[sol[j][1][i] for j in range(len(sol))] for i in range(problem_params['nvars'])]) + sol_data = np.array([[sol[j][1][i] for j in range(len(sol))] for i in range(P.nvars)]) data = dict() data['dt'] = sol_dt diff --git a/pySDC/projects/DAE/run/run_convergence_test.py b/pySDC/projects/DAE/run/run_convergence_test.py index 3229d4f7f7..b8a23671fc 100644 --- a/pySDC/projects/DAE/run/run_convergence_test.py +++ b/pySDC/projects/DAE/run/run_convergence_test.py @@ -5,7 +5,7 @@ from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1 from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE -from pySDC.projects.DAE.misc.HookClass_DAE import error_hook +from pySDC.projects.DAE.misc.HookClass_DAE import LogGlobalErrorPostStepDifferentialVariable from pySDC.helpers.stats_helper import get_sorted from pySDC.helpers.stats_helper import filter_stats @@ -25,7 +25,6 @@ def setup(): # This comes as read-in for the problem class problem_params = dict() problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver - problem_params['nvars'] = 3 # This comes as read-in for the step class step_params = dict() @@ -34,7 +33,7 @@ def setup(): # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - controller_params['hook_class'] = error_hook + controller_params['hook_class'] = LogGlobalErrorPostStepDifferentialVariable # Fill description dictionary for easy hierarchy creation description = dict() @@ -90,7 +89,7 @@ def run(description, controller_params, run_params): uend, stats = controller.run(u0=uinit, t0=run_params['t0'], Tend=run_params['tend']) # compute exact solution and compare - err = get_sorted(stats, type='error_post_step', sortby='time') + err = get_sorted(stats, type='e_global_differential_post_step', sortby='time') niter = filter_stats(stats, type='niter') conv_data[qd_type][num_nodes]['error'][j] = np.linalg.norm([err[j][1] for j in range(len(err))], np.inf) diff --git a/pySDC/projects/DAE/run/run_iteration_test.py b/pySDC/projects/DAE/run/run_iteration_test.py index 986895b044..bf6c0d1fdb 100644 --- a/pySDC/projects/DAE/run/run_iteration_test.py +++ b/pySDC/projects/DAE/run/run_iteration_test.py @@ -6,7 +6,7 @@ from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1 from pySDC.projects.DAE.problems.transistor_amplifier import one_transistor_amplifier from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE -from pySDC.projects.DAE.misc.HookClass_DAE import error_hook +from pySDC.projects.DAE.misc.HookClass_DAE import LogGlobalErrorPostStepDifferentialVariable from pySDC.helpers.stats_helper import get_sorted from pySDC.helpers.stats_helper import filter_stats @@ -32,7 +32,6 @@ def setup(): # Absolute termination tollerance for implicit solver # Exactly how this is used can be adjusted in update_nodes() in the fully implicit sweeper problem_params['newton_tol'] = 1e-7 - problem_params['nvars'] = 3 # This comes as read-in for the step class step_params = dict() @@ -40,7 +39,7 @@ def setup(): # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - controller_params['hook_class'] = error_hook + controller_params['hook_class'] = LogGlobalErrorPostStepDifferentialVariable # Fill description dictionary for easy hierarchy creation description = dict() @@ -98,7 +97,7 @@ def run(description, controller_params, run_params): uend, stats = controller.run(u0=uinit, t0=run_params['t0'], Tend=run_params['tend']) # compute exact solution and compare - err = get_sorted(stats, type='error_post_step', sortby='time') + err = get_sorted(stats, type='e_global_differential_post_step', sortby='time') residual = get_sorted(stats, type='residual_post_step', sortby='time') niter = filter_stats(stats, type='niter') diff --git a/pySDC/projects/DAE/run/synchronous_machine_playground.py b/pySDC/projects/DAE/run/synchronous_machine_playground.py index 32e8b5a4fc..f658966224 100644 --- a/pySDC/projects/DAE/run/synchronous_machine_playground.py +++ b/pySDC/projects/DAE/run/synchronous_machine_playground.py @@ -6,10 +6,10 @@ from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.projects.DAE.problems.synchronous_machine import synchronous_machine_infinite_bus from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE -from pySDC.projects.DAE.misc.HookClass_DAE import approx_solution_hook -from pySDC.projects.DAE.misc.HookClass_DAE import error_hook +from pySDC.projects.DAE.misc.HookClass_DAE import LogGlobalErrorPostStepDifferentialVariable from pySDC.helpers.stats_helper import get_sorted from pySDC.helpers.stats_helper import filter_stats +from pySDC.implementations.hooks.log_solution import LogSolution def main(): @@ -29,8 +29,7 @@ def main(): # initialize problem parameters problem_params = dict() - problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver - problem_params['nvars'] = 14 + problem_params['newton_tol'] = 1e-3 # initialize step parameters step_params = dict() @@ -39,7 +38,7 @@ def main(): # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - controller_params['hook_class'] = [error_hook, approx_solution_hook] + controller_params['hook_class'] = [LogGlobalErrorPostStepDifferentialVariable, LogSolution] # Fill description dictionary for easy hierarchy creation description = dict() @@ -67,33 +66,41 @@ def main(): uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) # check error (only available if reference solution was provided) - # err = get_sorted(stats, type='error_post_step', sortby='time') + # err = get_sorted(stats, type='e_global_differential_post_step', sortby='time') # err = np.linalg.norm([err[i][1] for i in range(len(err))], np.inf) # print(f"Error is {err}") - uend_ref = [ + uend_ref = P.dtype_u(P.init) + uend_ref.diff[:8] = ( 8.30823565e-01, -4.02584174e-01, 1.16966755e00, 9.47592808e-01, -3.68076863e-01, -3.87492326e-01, + 3.10281509e-01, + 9.94039645e-01, + ) + uend_ref.alg[:6] = ( -7.77837831e-01, -1.67347611e-01, 1.34810867e00, 5.46223705e-04, 1.29690691e-02, -8.00823474e-02, - 3.10281509e-01, - 9.94039645e-01, - ] - err = np.linalg.norm(uend - uend_ref, np.inf) + ) + err = abs(uend.diff - uend_ref.diff) assert np.isclose(err, 0, atol=1e-4), "Error too large." # store results - sol = get_sorted(stats, type='approx_solution', sortby='time') + sol = get_sorted(stats, type='u', sortby='time') sol_dt = np.array([sol[i][0] for i in range(len(sol))]) - sol_data = np.array([[sol[j][1][i] for j in range(len(sol))] for i in range(problem_params['nvars'])]) + sol_data = np.array( + [ + [(sol[j][1].diff[id], sol[j][1].alg[ia]) for j in range(len(sol))] + for id, ia in zip(range(len(uend.diff)), range(len(uend.alg))) + ] + ) niter = filter_stats(stats, type='niter') niter = np.fromiter(niter.values(), int) diff --git a/pySDC/projects/DAE/sweepers/SemiImplicitDAE.py b/pySDC/projects/DAE/sweepers/SemiImplicitDAE.py new file mode 100644 index 0000000000..ede17ea686 --- /dev/null +++ b/pySDC/projects/DAE/sweepers/SemiImplicitDAE.py @@ -0,0 +1,190 @@ +from pySDC.core.Errors import ParameterError +from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE + + +class SemiImplicitDAE(fully_implicit_DAE): + r""" + Custom sweeper class to implement SDC for solving semi-explicit DAEs of the form + + .. math:: + u' = f(u, z, t), + + .. math:: + 0 = g(u, z, t) + + with :math:`u(t), u'(t) \in\mathbb{R}^{N_d}` the differential variables and their derivates, + algebraic variables :math:`z(t) \in\mathbb{R}^{N_a}`, :math:`f(u, z, t) \in \mathbb{R}^{N_d}`, + and :math:`g(u, z, t) \in \mathbb{R}^{N_a}`. :math:`N = N_d + N_a` is the dimension of the whole + system of DAEs. + + It solves a collocation problem of the form + + .. math:: + U = f(\vec{U}_0 + \Delta t (\mathbf{Q} \otimes \mathbf{I}_{n_d}) \vec{U}, \vec{z}, \tau), + + .. math:: + 0 = g(\vec{U}_0 + \Delta t (\mathbf{Q} \otimes \mathbf{I}_{n_d}) \vec{U}, \vec{z}, \tau), + + where + + - :math:`\tau=(\tau_1,..,\tau_M) in \mathbb{R}^M` the vector of collocation nodes, + - :math:`\vec{U}_0 = (u_0,..,u_0) \in \mathbb{R}^{MN_d}` the vector of initial condition spread to each node, + - spectral integration matrix :math:`\mathbf{Q} \in \mathbb{R}^{M \times M}`, + - :math:`\vec{U}=(U_1,..,U_M) \in \mathbb{R}^{MN_d}` the vector of unknown derivatives of differential variables + :math:`U_m \approx U(\tau_m) = u'(\tau_m) \in \mathbb{R}^{N_d}`, + - :math:`\vec{z}=(z_1,..,z_M) \in \mathbb{R}^{MN_a}` the vector of unknown algebraic variables + :math:`z_m \approx z(\tau_m) \in \mathbb{R}^{N_a}`, + - and identity matrix :math:`\mathbf{I}_{N_d} \in \mathbb{R}^{N_d \times N_d}`. + + This sweeper treats the differential and the algebraic variables differently by only integrating the differential + components. Solving the nonlinear system, :math:`{U,z}` are the unknowns. + + The sweeper implementation is based on the ideas mentioned in the KDC publication [1]_. + + Parameters + ---------- + params : dict + Parameters passed to the sweeper. + + Attributes + ---------- + QI : np.2darray + Implicit Euler integration matrix. + + References + ---------- + .. [1] J. Huang, J. Jun, M. L. Minion. Arbitrary order Krylov deferred correction methods for differential algebraic + equation. J. Comput. Phys. Vol. 221 No. 2 (2007). + + Note + ---- + The right-hand side of the problem DAE classes using this sweeper has to be exactly implemented in the way, the + semi-explicit DAE is defined. Define :math:`\vec{x}=(y, z)^T`, :math:`F(\vec{x})=(f(\vec{x}), g(\vec{x}))`, and the + matrix + + .. math:: + A = \begin{matrix} + I & 0 \\ + 0 & 0 + \end{matrix} + + then, the problem can be reformulated as + + .. math:: + A\vec{x}' = F(\vec{x}). + + Then, setting :math:`F_{new}(\vec{x}, \vec{x}') = A\vec{x}' - F(\vec{x})` defines a DAE of fully-implicit form + + .. math:: + 0 = F_{new}(\vec{x}, \vec{x}'). + + Hence, the method ``eval_f`` of problem DAE classes of semi-explicit form implements the right-hand side in the way of + returning :math:`F(\vec{x})`, whereas ``eval_f`` of problem classes of fully-implicit form return the right-hand side + :math:`F_{new}(\vec{x}, \vec{x}')`. + """ + + def __init__(self, params): + """Initialization routine""" + + if 'QI' not in params: + params['QI'] = 'IE' + + # call parent's initialization routine + super().__init__(params) + + msg = f"Quadrature type {self.params.quad_type} is not implemented yet. Use 'RADAU-RIGHT' instead!" + if self.coll.left_is_node: + raise ParameterError(msg) + + self.QI = self.get_Qdelta_implicit(coll=self.coll, qd_type=self.params.QI) + + def integrate(self): + r""" + Returns the solution by integrating its gradient (fundamental theorem of calculus) at each collocation node. + ``level.f`` stores the gradient of solution ``level.u``. + + Returns + ------- + me : list of lists + Integral of the gradient at each collocation node. + """ + + # get current level and problem description + L = self.level + P = L.prob + M = self.coll.num_nodes + + me = [] + for m in range(1, M + 1): + # new instance of dtype_u, initialize values with 0 + me.append(P.dtype_u(P.init, val=0.0)) + for j in range(1, M + 1): + me[-1].diff[:] += L.dt * self.coll.Qmat[m, j] * L.f[j].diff[:] + + return me + + def update_nodes(self): + r""" + Updates the values of solution ``u`` and their gradient stored in ``f``. + """ + + L = self.level + P = L.prob + + # only if the level has been touched before + assert L.status.unlocked + M = self.coll.num_nodes + + integral = self.integrate() + # build the rest of the known solution u_0 + del_t(Q - Q_del)U_k + for m in range(1, M + 1): + for j in range(1, m + 1): + integral[m - 1].diff[:] -= L.dt * self.QI[m, j] * L.f[j].diff[:] + integral[m - 1].diff[:] += L.u[0].diff + + # do the sweep + for m in range(1, M + 1): + u_approx = P.dtype_u(integral[m - 1]) + for j in range(1, m): + u_approx.diff[:] += L.dt * self.QI[m, j] * L.f[j].diff[:] + + def implSystem(unknowns): + """ + Build implicit system to solve in order to find the unknowns. + + Parameters + ---------- + unknowns : dtype_u + Unknowns of the system. + + Returns + ------- + sys : + System to be solved as implicit function. + """ + + unknowns_mesh = P.dtype_f(unknowns) + + local_u_approx = P.dtype_u(u_approx) + local_u_approx.diff[:] += L.dt * self.QI[m, m] * unknowns_mesh.diff[:] + local_u_approx.alg[:] = unknowns_mesh.alg[:] + + sys = P.eval_f(local_u_approx, unknowns_mesh, L.time + L.dt * self.coll.nodes[m - 1]) + return sys + + u0 = P.dtype_u(P.init) + u0.diff[:], u0.alg[:] = L.f[m].diff[:], L.u[m].alg[:] + u_new = P.solve_system(implSystem, u0, L.time + L.dt * self.coll.nodes[m - 1]) + # ---- update U' and z ---- + L.f[m].diff[:] = u_new.diff[:] + L.u[m].alg[:] = u_new.alg[:] + + # Update solution approximation + integral = self.integrate() + for m in range(M): + L.u[m + 1].diff[:] = L.u[0].diff[:] + integral[m].diff[:] + + # indicate presence of new values at this level + L.status.updated = True + + return None diff --git a/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py b/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py index aab811ffb9..d5ce36e1b8 100644 --- a/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py +++ b/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py @@ -3,6 +3,7 @@ from pySDC.core.Errors import ParameterError from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit +from pySDC.projects.DAE.misc.DAEMesh import DAEMesh class fully_implicit_DAE(generic_implicit): @@ -46,18 +47,13 @@ class fully_implicit_DAE(generic_implicit): """ def __init__(self, params): - """ - Initialization routine for the custom sweeper - - Args: - params: parameters for the sweeper - """ + """Initialization routine""" if 'QI' not in params: params['QI'] = 'IE' # call parent's initialization routine - super(fully_implicit_DAE, self).__init__(params) + super().__init__(params) msg = f"Quadrature type {self.params.quad_type} is not implemented yet. Use 'RADAU-RIGHT' instead!" if self.coll.left_is_node: @@ -71,34 +67,26 @@ def update_nodes(self): preconditioned Richardson iteration in **"ordinary"** SDC. """ - # get current level and problem description L = self.level - # in the fully implicit case L.prob.eval_f() evaluates the function F(u, u', t) P = L.prob # only if the level has been touched before assert L.status.unlocked - # get number of collocation nodes for easier access M = self.coll.num_nodes - u_0 = L.u[0] # get QU^k where U = u' - # note that for multidimensional functions the required Kronecker product is achieved since - # e.g. L.f[j] is a mesh object and multiplication with a number distributes over the mesh integral = self.integrate() # build the rest of the known solution u_0 + del_t(Q - Q_del)U_k for m in range(1, M + 1): for j in range(1, M + 1): integral[m - 1] -= L.dt * self.QI[m, j] * L.f[j] - # add initial value - integral[m - 1] += u_0 + integral[m - 1] += L.u[0] # do the sweep for m in range(1, M + 1): - # build implicit function, consisting of the known values from above and new values from previous nodes (at k+1) - u_approx = P.dtype_u(integral[m - 1]) # add the known components from current sweep del_t*Q_del*U_k+1 + u_approx = P.dtype_u(integral[m - 1]) for j in range(1, m): u_approx += L.dt * self.QI[m, j] * L.f[j] @@ -118,11 +106,10 @@ def implSystem(params): System to be solved as implicit function. """ - params_mesh = P.dtype_f(P.init) - params_mesh[:] = params + params_mesh = P.dtype_f(params) # build parameters to pass to implicit function - local_u_approx = u_approx + local_u_approx = P.dtype_f(u_approx) # note that derivatives of algebraic variables are taken into account here too # these do not directly affect the output of eval_f but rather indirectly via QI @@ -131,22 +118,14 @@ def implSystem(params): sys = P.eval_f(local_u_approx, params_mesh, L.time + L.dt * self.coll.nodes[m - 1]) return sys - # get U_k+1 - # note: not using solve_system here because this solve step is the same for any problem - # See link for how different methods use the default tol parameter - # https://github.com/scipy/scipy/blob/8a6f1a0621542f059a532953661cd43b8167fce0/scipy/optimize/_root.py#L220 - # options['xtol'] = P.params.newton_tol - # options['eps'] = 1e-16 - - u_new = P.solve_system(implSystem, L.f[m], L.time + L.dt * self.coll.nodes[m - 1]) - # update gradient (recall L.f is being used to store the gradient) - L.f[m][:] = u_new + L.f[m] = P.solve_system(implSystem, L.f[m], L.time + L.dt * self.coll.nodes[m - 1]) # Update solution approximation integral = self.integrate() for m in range(M): - L.u[m + 1] = u_0 + integral[m] + L.u[m + 1] = L.u[0] + integral[m] + # indicate presence of new values at this level L.status.updated = True @@ -213,11 +192,7 @@ def compute_residual(self, stage=None): L.status.residual = 0.0 if L.status.residual is None else L.status.residual return None - # check if there are new values (e.g. from a sweep) - # assert L.status.updated - # compute the residual for each node - res_norm = [] for m in range(self.coll.num_nodes): # use abs function from data type here @@ -242,3 +217,18 @@ def compute_residual(self, stage=None): L.status.updated = False return None + + def compute_end_point(self): + """ + Compute u at the right point of the interval + + The value uend computed here is a full evaluation of the Picard formulation unless do_full_update==False + + Returns: + None + """ + + if not self.coll.right_is_node or self.params.do_coll_update: + raise NotImplementedError() + + super().compute_end_point() diff --git a/pySDC/projects/Monodomain/README.rst b/pySDC/projects/Monodomain/README.rst new file mode 100644 index 0000000000..fd0e4d344d --- /dev/null +++ b/pySDC/projects/Monodomain/README.rst @@ -0,0 +1,94 @@ +Exponential SDC for the Monodomain Equation in Cardiac Electrophysiology +============================================================================== +This project implements the exponential spectral deferred correction (ESDC) method for the monodomain equation in cardiac electrophysiology. +The method proposed here is an adaptation of the `ESDC method proposed by T. Buvoli `_ to the monodomain equation. +In particular, the implicit-explicit Rush-Larsen method is used as correction scheme. Moreover, not all model components have exponential terms, therefore the resulting method is an hybrid between ESDC and the standard SDC method. + +Monodomain equation +------------------- +The monodomain equation models the electrical activity in the heart. It is a reaction-diffusion equation coupled with an ordinary differential equation for the ionic model and is given by + +.. math:: + \begin{align} + \chi (C_m\frac{\partial V}{\partial t}+I_{ion}(V,z_E,z_e, t)) &= \nabla \cdot (D \nabla V) & \quad \text{in } &\Omega \times (0,T), \\ + \frac{\partial z_E}{\partial t} &= g_E(V,z_E,z_e) & \quad \text{in } &\Omega \times (0,T), \\ + \frac{\partial z_e}{\partial t} &= \Lambda_e(V)(z_e-z_{e,\infty}(V)) & \quad \text{in } &\Omega \times (0,T), \\ + \end{align} + +plus the boundary conditions, where :math:`V(t,x)\in\mathbb{R}` is the transmembrane potential and :math:`z_E(t,x)\in\mathbb{R}^n`, :math:`z_e(t,x)\in\mathbb{R}^m` are the ionic model state variables. +The ionic model right-hand side :math:`g_E` is a general nonlinear term, while :math:`\Lambda_e` is a diagonal matrix. The typical range for the number of unknowns :math:`N=1+n+m` is :math:`N\in [4,50]` and depends on the ionic model of choice. + +Spatial discretization yields a system of ODEs which can be written in compact form as + +.. math:: + \mathbf y'=f_I(\mathbf y)+f_E(\mathbf y)+f_e(\mathbf y), + +where :math:`\mathbf y(t)\in\mathbb{R}^{M N}` is the vector of unknowns and :math:`M` the number of mesh nodes. +Concerning the right-hand sides, :math:`f_I` is a linear term for the discrete diffusion, :math:`f_E` is a nonlinear but non-stiff term for :math:`I_{ion},g_E`, and :math:`f_e` is a severely stiff term for :math:`\Lambda_e(V)(z_e-z_{e,\infty}(V))`. + +The standard (serial) way of integrating the monodomain equation is by using a splitting method, where :math:`f_I` is integrated implicitly, :math:`f_E` explicitly, and :math:`f_e` using the exponential Euler method (which is inexpensive due to the diagonal structure of :math:`\Lambda_e`). We denote this method as IMEXEXP. + +The ESDC method for the monodomain equation +------------------------------------------- +A possible way to parallelize the integration of the monodomain equation is by employing the SDC method in combination with the IMEXEXP approach for the correction scheme (preconditioner). +However, this approach is unstable due to the severe stiffness of :math:`f_e`. +Therefore we propose a hybrid method, where we employ SDC for the :math:`f_I,f_E` terms and ESDC for the :math:`f_e` term. For the correcttion scheme we still use the IMEXEXP method. +The resulting method can be seen as a particular case of ESDC and will be denoted by ESDC in the next figures, for simplicity. + +Running the code +---------------- +Due to their complexity, ionic models are coded in C++ and wrapped to Python. Therefore, before running any example you need to compile the ionic models by running the following command in the root folder: + +.. code-block:: + + export IONIC_MODELS_PATH=pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp + c++ -O3 -Wall -shared -std=c++11 -fPIC -fvisibility=hidden $(python3 -m pybind11 --includes) ${IONIC_MODELS_PATH}/bindings_definitions.cpp -o ${IONIC_MODELS_PATH}/ionicmodels$(python3-config --extension-suffix) + +Then an example can be run: + +.. code-block:: + + cd pySDC/projects/Monodomain/run_scripts + mpirun -n 4 python run_MonodomainODE_cli.py --dt 0.05 --end_time 0.2 --num_nodes 6,3 --domain_name cube_1D --refinements 0 --ionic_model_name TTP --truly_time_parallel --n_time_ranks 4 + +Stability +--------- +We display here the stability domain of the ESDC and SDC methods, both with IMEXEXP as correction scheme, applied to the test problem + +.. math:: + y'=\lambda_I y+\lambda_E y+\lambda_e y, + +with :math:`\lambda_I,\lambda_E,\lambda_e` representing :math:`f_I,f_E,f_e`, respectively. +We fix :math:`\lambda_E=-1` and vary the stiff terms :math:`\lambda_I,\lambda_e` only. We see that the ESDC method is stable for all tested values of :math:`\lambda_I,\lambda_e`, while SDC is not. + +.. image:: ../../../data/stability_domain_IMEXEXP_EXPRK.png + :scale: 60 % +.. image:: ../../../data/stability_domain_IMEXEXP.png + :scale: 60 % + +Convergence +----------- +Here we verify convergence of the ESDC method for the monodomain equation. +We fix the number of collocation nodes to :math:`m=6` and perform a convergence experiment fixing the number of sweeps to either :math:`k=3` or :math:`k=6`. +We use the ten Tusscher-Panfilov ionic model, which is employed in practical applications. +We see that we gain one order of accuracy per sweep, as expected. + +.. image:: ../../../data/convergence_ESDC_fixed_iter.png + :scale: 100 % + + +Iterations +---------- +Here we consider three methods: + +* ESDC: with :math:`m=6` collocation nodes. +* MLESDC: This is a multilevel version of ESDC with :math:`m=6` collocation nodes on the fine level and :math:`m=3` nodes on the coarse level. +* PFASST: Combination of the PFASST parallelization method with MLESDC, using 24 processors. + +We display the number of iterations required by each method to reach a given tolerance and the residual at convergence. As ionic model we use again the ten Tusscher-Panfilov model. +We see that PFASST requires a reasonalbly small number of iterations, comparable to the serial counterparts ESDC and MLESDC. + +.. image:: ../../../data/niter_VS_time.png + :scale: 100 % +.. image:: ../../../data/res_VS_time.png + :scale: 100 % \ No newline at end of file diff --git a/pySDC/projects/Monodomain/datatype_classes/my_mesh.py b/pySDC/projects/Monodomain/datatype_classes/my_mesh.py new file mode 100644 index 0000000000..d07079bea0 --- /dev/null +++ b/pySDC/projects/Monodomain/datatype_classes/my_mesh.py @@ -0,0 +1,5 @@ +from pySDC.implementations.datatype_classes.mesh import MultiComponentMesh + + +class imexexp_mesh(MultiComponentMesh): + components = ['impl', 'expl', 'exp'] diff --git a/pySDC/projects/Monodomain/etc/environment-monodomain.yml b/pySDC/projects/Monodomain/etc/environment-monodomain.yml new file mode 100644 index 0000000000..3fe8c09c3a --- /dev/null +++ b/pySDC/projects/Monodomain/etc/environment-monodomain.yml @@ -0,0 +1,24 @@ +name: pySDC_monodomain +channels: + - conda-forge + - defaults +dependencies: + - python + - numpy + - scipy>=0.17.1 + - matplotlib>=3.0 + - sympy>=1.0 + - numba>=0.35 + - dill>=0.2.6 + - pytest + - pytest-benchmark + - pytest-timeout + - pytest-order + - coverage[toml] + - sphinx + - numdifftools + - pybind11 + - mpi4py + - mpich + - tqdm + - pymp-pypi diff --git a/pySDC/projects/Monodomain/hooks/HookClass_pde.py b/pySDC/projects/Monodomain/hooks/HookClass_pde.py new file mode 100644 index 0000000000..e4940ee153 --- /dev/null +++ b/pySDC/projects/Monodomain/hooks/HookClass_pde.py @@ -0,0 +1,34 @@ +from pySDC.core.Hooks import hooks + + +class pde_hook(hooks): + """ + Hook class to write the solution to file. + """ + + def __init__(self): + super(pde_hook, self).__init__() + + def pre_run(self, step, level_number): + """ + Overwrite default routine called before time-loop starts + It calls the default routine and then writes the initial value to file. + """ + super(pde_hook, self).pre_run(step, level_number) + + L = step.levels[level_number] + P = L.prob + if level_number == 0 and L.time == P.t0: + P.write_solution(L.u[0], P.t0) + + def post_step(self, step, level_number): + """ + Overwrite default routine called after each step. + It calls the default routine and then writes the solution to file. + """ + super(pde_hook, self).post_step(step, level_number) + + if level_number == 0: + L = step.levels[level_number] + P = L.prob + P.write_solution(L.uend, L.time + L.dt) diff --git a/pySDC/projects/Monodomain/hooks/HookClass_post_iter_info.py b/pySDC/projects/Monodomain/hooks/HookClass_post_iter_info.py new file mode 100644 index 0000000000..6cbb98100a --- /dev/null +++ b/pySDC/projects/Monodomain/hooks/HookClass_post_iter_info.py @@ -0,0 +1,34 @@ +import time +from pySDC.core.Hooks import hooks + + +class post_iter_info_hook(hooks): + """ + Hook class to write additional iteration information to the command line. + It is used to print the final residual, after u[0] has been updated with the new value from the previous step. + This residual is the one used to check the convergence of the iteration and when running in parallel is different from + the one printed at IT_FINE. + """ + + def __init__(self): + super(post_iter_info_hook, self).__init__() + + def post_iteration(self, step, level_number): + """ + Overwrite default routine called after each iteration. + It calls the default routine and then writes the residual to the command line. + We call this the residual at IT_END. + """ + super().post_iteration(step, level_number) + self.__t1_iteration = time.perf_counter() + + L = step.levels[level_number] + + self.logger.info( + "Process %2i on time %8.6f at stage %15s: ----------- Iteration: %2i --------------- " "residual: %12.8e", + step.status.slot, + L.time, + "IT_END", + step.status.iter, + L.status.residual, + ) diff --git a/pySDC/projects/Monodomain/problem_classes/MonodomainODE.py b/pySDC/projects/Monodomain/problem_classes/MonodomainODE.py new file mode 100644 index 0000000000..ff22af7f18 --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/MonodomainODE.py @@ -0,0 +1,408 @@ +from pathlib import Path +import logging +import numpy as np +from pySDC.core.Problem import ptype +from pySDC.implementations.datatype_classes.mesh import mesh +from pySDC.projects.Monodomain.datatype_classes.my_mesh import imexexp_mesh +from pySDC.projects.Monodomain.problem_classes.space_discretizazions.Parabolic_DCT import Parabolic_DCT +import pySDC.projects.Monodomain.problem_classes.ionicmodels.cpp as ionicmodels + + +class MonodomainODE(ptype): + """ + A class for the discretization of the Monodomain equation. The Monodomain equation is a parabolic PDE composed of + a reaction-diffusion equation coupled with an ODE system. The unknowns are the potential V and the ionic model variables (g_1,...,g_N). + The reaction-diffusion equation is discretized in another class, where any spatial discretization can be used. + The ODE system is the ionic model, which doesn't need spatial discretization, being a system of ODEs. + + + + Attributes: + ----------- + parabolic: The parabolic problem class used to discretize the reaction-diffusion equation + ionic_model: The ionic model used to discretize the ODE system. This is a wrapper around the actual ionic model, which is written in C++. + size: The number of variables in the ionic model + vector_type: The type of vector used to store a single unknown (e.g. V). This data type depends on spatial discretization, hence on the parabolic class. + dtype_u: The type of vector used to store all the unknowns (V,g_1,...,g_N). This is a vector of vector_type. + dtype_f: The type of vector used to store the right-hand side of the ODE system stemming from the monodomain equation. This is a vector of vector_type. + output_folder: The folder where the solution is written to file + t0: The initial simulation time. This is 0.0 by default but can be changed by the user in order to skip the initial stimulus. + Tend: The duration of the simulation. + output_V_only: If True, only the potential V is written to file. If False, all the ionic model variables are written to file. + read_init_val: If True, the initial value is read from file. If False, the initial value is at equilibrium. + """ + + dtype_u = mesh + dtype_f = mesh + + def __init__(self, **problem_params): + self.logger = logging.getLogger("step") + + self.parabolic = Parabolic_DCT(**problem_params) + + self.define_ionic_model(problem_params["ionic_model_name"]) + + self.init = ((self.size, *self.parabolic.init[0]), self.parabolic.init[1], self.parabolic.init[2]) + + # invoke super init + super(MonodomainODE, self).__init__(self.init) + # store all problem params dictionary values as attributes + self._makeAttributeAndRegister(*problem_params.keys(), localVars=problem_params, readOnly=True) + + self.define_stimulus() + + # initial and end time + self.t0 = 0.0 + self.Tend = 50.0 if self.end_time < 0.0 else self.end_time + + # init output stuff + self.output_folder = ( + Path(self.output_root) + / Path(self.parabolic.domain_name) + / Path(self.parabolic.mesh_name) + / Path(self.ionic_model_name) + ) + self.parabolic.init_output(self.output_folder) + + def init_exp_extruded(self, new_dim_shape): + # The info needed to initialize a new vector of size (M,N) where M is the number of variables in the + # ionic model with exponential terms and N is the number of dofs in the mesh. + # The vector is further extruded to additional dimensions with shape new_dim_shape. + return ((*new_dim_shape, len(self.rhs_exp_indeces), self.init[0][1]), self.init[1], self.init[2]) + + def write_solution(self, uh, t): + # write solution to file, only the potential V=uh[0], not the ionic model variables + self.parabolic.write_solution(uh[0], t) + + def write_reference_solution(self, uh, all=False): + # write solution to file, only the potential V=uh[0] or all variables if all=True + self.parabolic.write_reference_solution(uh, list(range(self.size)) if all else [0]) + + def read_reference_solution(self, uh, ref_file_name, all=False): + # read solution from file, only the potential V=uh[0] or all variables if all=True + # returns true if read was successful, false else + return self.parabolic.read_reference_solution(uh, list(range(self.size)) if all else [0], ref_file_name) + + def initial_value(self): + # Create initial value. Every variable is constant in space + u0 = self.dtype_u(self.init) + init_vals = self.ionic_model.initial_values() + for i in range(self.size): + u0[i][:] = init_vals[i] + + # overwrite the initial value with solution from file if desired + if self.read_init_val: + read_ok = self.read_reference_solution(u0, self.init_val_name, True) + assert read_ok, "ERROR: Could not read initial value from file." + + return u0 + + def compute_errors(self, uh): + """ + Compute L2 error of uh[0] (potential V) + Args: + uh (VectorOfVectors): solution as vector of vectors + + Returns: + computed (bool): if error computation was successful + error (float): L2 error + rel_error (float): relative L2 error + """ + ref_sol_V = self.dtype_u(init=self.init, val=0.0) + read_ok = self.read_reference_solution(ref_sol_V, self.ref_sol, False) + if read_ok: + error_L2, rel_error_L2 = self.parabolic.compute_errors(uh[0], ref_sol_V[0]) + + print(f"L2-errors: {error_L2}") + print(f"Relative L2-errors: {rel_error_L2}") + + return True, error_L2, rel_error_L2 + else: + return False, 0.0, 0.0 + + def define_ionic_model(self, ionic_model_name): + self.scale_Iion = 0.01 # used to convert currents in uA/cm^2 to uA/mm^2 + # scale_im is applied to the rhs of the ionic model, so that the rhs is in units of mV/ms + self.scale_im = self.scale_Iion / self.parabolic.Cm + + if ionic_model_name in ["HodgkinHuxley", "HH"]: + self.ionic_model = ionicmodels.HodgkinHuxley(self.scale_im) + elif ionic_model_name in ["Courtemanche1998", "CRN"]: + self.ionic_model = ionicmodels.Courtemanche1998(self.scale_im) + elif ionic_model_name in ["TenTusscher2006_epi", "TTP"]: + self.ionic_model = ionicmodels.TenTusscher2006_epi(self.scale_im) + elif ionic_model_name in ["TTP_S", "TTP_SMOOTH"]: + self.ionic_model = ionicmodels.TenTusscher2006_epi_smooth(self.scale_im) + elif ionic_model_name in ["BiStable", "BS"]: + self.ionic_model = ionicmodels.BiStable(self.scale_im) + else: + raise Exception("Unknown ionic model.") + + self.size = self.ionic_model.size + + def define_stimulus(self): + + stim_dur = 2.0 + if "cuboid" in self.parabolic.domain_name: + self.stim_protocol = [[0.0, stim_dur]] # list of stimuli times and stimuli durations + self.stim_intensities = [50.0] # list of stimuli intensities + self.stim_centers = [[0.0, 0.0, 0.0]] # list of stimuli centers + r = 1.5 + self.stim_radii = [[r, r, r]] * len( + self.stim_protocol + ) # list of stimuli radii in the three directions (x,y,z) + elif "cube" in self.parabolic.domain_name: + self.stim_protocol = [[0.0, 2.0], [1000.0, 10.0]] + self.stim_intensities = [50.0, 80.0] + centers = [[0.0, 50.0, 50.0], [58.5, 0.0, 50.0]] + self.stim_centers = [centers[i] for i in range(len(self.stim_protocol))] + self.stim_radii = [[1.0, 50.0, 50.0], [1.5, 60.0, 50.0]] + else: + raise Exception("Unknown domain name.") + + self.stim_protocol = np.array(self.stim_protocol) + self.stim_protocol[:, 0] -= self.init_time # shift stimulus times by the initial time + + # index of the last stimulus applied. The value -1 means no stimulus has been applied yet. + self.last_stim_index = -1 + + def eval_f(self, u, t, fh=None): + if fh is None: + fh = self.dtype_f(init=self.init, val=0.0) + + # eval ionic model rhs on u and put result in fh. All indices of the vector of vector fh must be computed (list(range(self.size)) + self.eval_expr(self.ionic_model.f, u, fh, list(range(self.size)), False) + # apply stimulus + fh[0] += self.Istim(t) + + # apply diffusion + self.parabolic.add_disc_laplacian(u[0], fh[0]) + + return fh + + def Istim(self, t): + tol = 1e-8 + for i, (stim_time, stim_dur) in enumerate(self.stim_protocol): + # Look for which stimulus to apply at the current time t by checking the stimulus protocol: + # Check if t is in the interval [stim_time, stim_time+stim_dur] with a tolerance tol + # and apply the corresponding stimulus + if (t + stim_dur * tol >= stim_time) and (t + stim_dur * tol < stim_time + stim_dur): + # if the stimulus is not the same as the last one applied, update the last_stim_index and the space_stim vector + if i != self.last_stim_index: + self.last_stim_index = i + # get the vector of zeros and ones defining the stimulus region + self.space_stim = self.parabolic.stim_region(self.stim_centers[i], self.stim_radii[i]) + # scale by the stimulus intensity and apply the change of units + self.space_stim *= self.scale_im * self.stim_intensities[i] + return self.space_stim + + return self.parabolic.zero_stim_vec + + def eval_expr(self, expr, u, fh, indeces, zero_untouched_indeces=True): + # evaluate the expression expr on u and put the result in fh + # Here expr is a wrapper on a C++ function that evaluates the rhs of the ionic model (or part of it) + if expr is not None: + expr(u, fh) + + # indeces is a list of integers indicating which variables are modified by the expression expr. + # This information is known a priori. Here we use it to zero the variables that are not modified by expr (if zero_untouched_indeces is True) + if zero_untouched_indeces: + non_indeces = [i for i in range(self.size) if i not in indeces] + for i in non_indeces: + fh[i][:] = 0.0 + + +class MultiscaleMonodomainODE(MonodomainODE): + """ + The multiscale version of the MonodomainODE problem. This class is used to solve the monodomain equation with a multirate solver. + The main difference with respect to the MonodomainODE class is that the right-hand side of the ODE system is split into three parts: + - impl: The discrete Laplacian. This is a stiff term threated implicitly by time integrators. + - expl: The non stiff term of the ionic models, threated explicitly by time integrators. + - exp: The very stiff but diagonal terms of the ionic models, threated exponentially by time integrators. + """ + + dtype_f = imexexp_mesh + + def __init__(self, **problem_params): + super(MultiscaleMonodomainODE, self).__init__(**problem_params) + + self.define_splittings() + + self.constant_lambda_and_phi = False + + def define_splittings(self): + """ + This function defines the splittings used in the problem. + The im_* variables are meant for internal use, the rhs_* for external use (i.e. in the sweeper). + The *_args and *_indeces are list of integers. + The *_args are list of variables that are needed to evaluate a function plus the variables that are modified by the function. + The *_indeces are the list of variables that are modified by the function (subset of args). + Example: for f(x_0,x_1,x_2,x_3,x_4)=f(x_0,x_2,x_4)=(y_0,y_1,0,0,y_4) we have + f_args=[0,1,2,4]=([0,2,4] union [0,1,4]) since f needs x_0,x_2,x_4 and y_0,y_1,y_4 are effective outputs of the function (others are zero). + f_indeces=[0,1,4] since only y_0,y_1,y_4 are outputs of the function, y_2,y_3 are zero + + The ionic model has many variables (say M) and each variable has the same number of dofs as the mesh (say N). + Therefore the problem has size N*M and quickly becomes very large. Thanks to args and indeces we can: + - avoid to copy the whole vector M*N of variables when we only need a subset, for instance 2*N + - avoid unnecessary operations on the whole vector, for instance update only the variables that are effective outputs of a function (indeces), + and so on. + + Yeah, it's a bit a mess, but helpful. + """ + # define nonstiff term (explicit part) + # the wrapper to c++ expression that evaluates the nonstiff part of the ionic model + self.im_f_nonstiff = self.ionic_model.f_expl + # the args of f_expl + self.im_nonstiff_args = self.ionic_model.f_expl_args + # the indeces of f_expl + self.im_nonstiff_indeces = self.ionic_model.f_expl_indeces + + # define stiff term (implicit part) + self.im_f_stiff = None # no stiff part coming from ionic model to be threated implicitly. Indeed, all the stiff terms are diagonal and are threated exponentially. + self.im_stiff_args = [] + self.im_stiff_indeces = [] + + # define exp term (eponential part) + # the exponential term is defined by f_exp(u)= lmbda(u)*(u-yinf(u)), hence we only need to define lmbda and yinf + # the wrapper to c++ expression that evaluates lmbda(u) + self.im_lmbda_exp = self.ionic_model.lmbda_exp + # the wrapper to c++ expression that evaluates lmbda(u) and yinf(u) + self.im_lmbda_yinf_exp = self.ionic_model.lmbda_yinf_exp + # the args of lmbda and yinf (they are the same) + self.im_exp_args = self.ionic_model.f_exp_args + # the indeces of lmbda and yinf + self.im_exp_indeces = self.ionic_model.f_exp_indeces + + # the spectral radius of the jacobian of non stiff term. We use a bound + self.rho_nonstiff_cte = self.ionic_model.rho_f_expl() + + self.rhs_stiff_args = self.im_stiff_args + self.rhs_stiff_indeces = self.im_stiff_indeces + # Add the potential V index 0 to the rhs_stiff_args and rhs_stiff_indeces. + # Indeed V is used to compute the Laplacian and is affected by the Laplacian, which is the implicit part of the problem. + if 0 not in self.rhs_stiff_args: + self.rhs_stiff_args = [0] + self.rhs_stiff_args + if 0 not in self.rhs_stiff_indeces: + self.rhs_stiff_indeces = [0] + self.rhs_stiff_indeces + + self.rhs_nonstiff_args = self.im_nonstiff_args + self.rhs_nonstiff_indeces = self.im_nonstiff_indeces + # Add the potential V index 0 to the rhs_nonstiff_indeces. Indeed V is affected by the stimulus, which is a non stiff term. + if 0 not in self.rhs_nonstiff_indeces: + self.rhs_nonstiff_indeces = [0] + self.rhs_nonstiff_indeces + + self.im_non_exp_indeces = [i for i in range(self.size) if i not in self.im_exp_indeces] + + self.rhs_exp_args = self.im_exp_args + self.rhs_exp_indeces = self.im_exp_indeces + + self.rhs_non_exp_indeces = self.im_non_exp_indeces + + # a vector of ones, useful + self.one = self.dtype_u(init=self.init, val=1.0) + + # some space to store lmbda and yinf + self.lmbda = self.dtype_u(init=self.init, val=0.0) + self.yinf = self.dtype_u(init=self.init, val=0.0) + + def solve_system(self, rhs, factor, u0, t, u_sol=None): + """ + Solve the system u_sol[0] = (M-factor*A)^{-1} * M * rhs[0] + and sets u_sol[i] = rhs[i] for i>0 (as if A=0 for i>0) + + Arguments: + rhs (dtype_u): right-hand side + factor (float): factor multiplying the Laplacian + u0 (dtype_u): initial guess + t (float): current time + u_sol (dtype_u, optional): some space to store the solution. If None, a new space is allocated. Can be the same as rhs. + """ + if u_sol is None: + u_sol = self.dtype_u(init=self.init, val=0.0) + + self.parabolic.solve_system(rhs[0], factor, u0[0], t, u_sol[0]) + + if rhs is not u_sol: + for i in range(1, self.size): + u_sol[i][:] = rhs[i][:] + + return u_sol + + def eval_f(self, u, t, eval_impl=True, eval_expl=True, eval_exp=True, fh=None, zero_untouched_indeces=True): + """ + Evaluates the right-hand side terms. + + Arguments: + u (dtype_u): the current solution + t (float): the current time + eval_impl (bool, optional): if True, evaluates the implicit part of the right-hand side. Default is True. + eval_expl (bool, optional): if True, evaluates the explicit part of the right-hand side. Default is True. + eval_exp (bool, optional): if True, evaluates the exponential part of the right-hand side. Default is True. + fh (dtype_f, optional): space to store the right-hand side. If None, a new space is allocated. Default is None. + zero_untouched_indeces (bool, optional): if True, the variables that are not modified by the right-hand side are zeroed. Default is True. + """ + + if fh is None: + fh = self.dtype_f(init=self.init, val=0.0) + + if eval_expl: + fh.expl = self.eval_f_nonstiff(u, t, fh.expl, zero_untouched_indeces) + + if eval_impl: + fh.impl = self.eval_f_stiff(u, t, fh.impl, zero_untouched_indeces) + + if eval_exp: + fh.exp = self.eval_f_exp(u, t, fh.exp, zero_untouched_indeces) + + return fh + + def eval_f_nonstiff(self, u, t, fh_nonstiff, zero_untouched_indeces=True): + # eval ionic model nonstiff terms + self.eval_expr(self.im_f_nonstiff, u, fh_nonstiff, self.im_nonstiff_indeces, zero_untouched_indeces) + + if not zero_untouched_indeces and 0 not in self.im_nonstiff_indeces: + fh_nonstiff[0][:] = 0.0 + + # apply stimulus + fh_nonstiff[0] += self.Istim(t) + + return fh_nonstiff + + def eval_f_stiff(self, u, t, fh_stiff, zero_untouched_indeces=True): + # eval ionic model stiff terms + self.eval_expr(self.im_f_stiff, u, fh_stiff, self.im_stiff_indeces, zero_untouched_indeces) + + if not zero_untouched_indeces and 0 not in self.im_stiff_indeces: + fh_stiff[0][:] = 0.0 + + # apply diffusion + self.parabolic.add_disc_laplacian(u[0], fh_stiff[0]) + + return fh_stiff + + def eval_f_exp(self, u, t, fh_exp, zero_untouched_indeces=True): + # eval ionic model exp terms f_exp(u)= lmbda(u)*(u-yinf(u) + self.eval_lmbda_yinf_exp(u, self.lmbda, self.yinf) + for i in self.im_exp_indeces: + fh_exp[i][:] = self.lmbda[i] * (u[i] - self.yinf[i]) + + if zero_untouched_indeces: + fh_exp[self.im_non_exp_indeces] = 0.0 + + return fh_exp + + def lmbda_eval(self, u, t, lmbda=None): + if lmbda is None: + lmbda = self.dtype_u(init=self.init, val=0.0) + + self.eval_lmbda_exp(u, lmbda) + + lmbda[self.im_non_exp_indeces] = 0.0 + + return lmbda + + def eval_lmbda_yinf_exp(self, u, lmbda, yinf): + self.im_lmbda_yinf_exp(u, lmbda, yinf) + + def eval_lmbda_exp(self, u, lmbda): + self.im_lmbda_exp(u, lmbda) diff --git a/pySDC/projects/Monodomain/problem_classes/TestODE.py b/pySDC/projects/Monodomain/problem_classes/TestODE.py new file mode 100644 index 0000000000..62d4bf01b6 --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/TestODE.py @@ -0,0 +1,119 @@ +import logging +import numpy as np +from pySDC.core.Problem import ptype +from pySDC.core.Common import RegisterParams +from pySDC.implementations.datatype_classes.mesh import mesh +from pySDC.projects.Monodomain.datatype_classes.my_mesh import imexexp_mesh + + +""" +Here we define the problems classes for the multirate Dahlquist test equation y'=lambda_I*y + lambda_E*y + lambda_e*y +Things are done so that it is compatible witht the sweepers. +""" + + +class Parabolic(RegisterParams): + def __init__(self, **problem_params): + self._makeAttributeAndRegister(*problem_params.keys(), localVars=problem_params, readOnly=True) + self.shape = (1,) + self.init = ((1,), None, np.dtype("float64")) + + +class TestODE(ptype): + def __init__(self, **problem_params): + self.logger = logging.getLogger("step") + + self.parabolic = Parabolic(**problem_params) + self.size = 1 # one state variable + self.init = ((self.size, *self.parabolic.init[0]), self.parabolic.init[1], self.parabolic.init[2]) + + # invoke super init + super(TestODE, self).__init__(self.init) + # store all problem params dictionary values as attributes + self._makeAttributeAndRegister(*problem_params.keys(), localVars=problem_params, readOnly=True) + + # initial and end time + self.t0 = 0.0 + self.Tend = 1.0 if self.end_time < 0.0 else self.end_time + + # set lambdas, if not provided by user + if not hasattr(self, 'lmbda_laplacian'): + self.lmbda_laplacian = -5.0 + if not hasattr(self, 'lmbda_gating'): + self.lmbda_gating = -10.0 + if not hasattr(self, 'lmbda_others'): + self.lmbda_others = -1.0 + + self.dtype_u = mesh + self.dtype_f = mesh + + def init_exp_extruded(self, new_dim_shape): + return ((*new_dim_shape, 1, self.init[0][1]), self.init[1], self.init[2]) + + def initial_value(self): + u0 = self.dtype_u(self.init, val=1.0) + + return u0 + + def eval_f(self, u, t, fh=None): + if fh is None: + fh = self.dtype_f(init=self.init, val=0.0) + + fh[0] = (self.lmbda_laplacian + self.lmbda_gating + self.lmbda_others) * u[0] + + return fh + + +class MultiscaleTestODE(TestODE): + def __init__(self, **problem_params): + super(MultiscaleTestODE, self).__init__(**problem_params) + + self.dtype_f = imexexp_mesh + + self.rhs_stiff_indeces = [0] + self.rhs_stiff_args = [0] + self.rhs_nonstiff_indeces = [0] + self.rhs_nonstiff_args = [0] + self.rhs_exp_args = [0] + self.rhs_exp_indeces = [0] + self.rhs_non_exp_indeces = [] + + self.constant_lambda_and_phi = True + + self.one = self.dtype_u(init=self.init, val=1.0) + + def solve_system(self, rhs, factor, u0, t, u_sol=None): + if u_sol is None: + u_sol = self.dtype_u(init=self.init, val=0.0) + + u_sol[0] = rhs[0] / (1 - factor * self.lmbda_laplacian) + + return u_sol + + def eval_f(self, u, t, eval_impl=True, eval_expl=True, eval_exp=True, fh=None, zero_untouched_indeces=True): + + if fh is None: + fh = self.dtype_f(init=self.init, val=0.0) + + if eval_expl: + fh.expl[0] = self.lmbda_others * u[0] + + if eval_impl: + fh.impl[0] = self.lmbda_laplacian * u[0] + + if eval_exp: + fh.exp[0] = self.lmbda_gating * u[0] + + return fh + + def eval_lmbda_yinf_exp(self, u, lmbda, yinf): + lmbda[0] = self.lmbda_gating + yinf[0] = 0.0 + + def lmbda_eval(self, u, t, lmbda=None): + if lmbda is None: + lmbda = self.dtype_u(init=self.init, val=0.0) + + lmbda[0] = self.lmbda_gating + + return lmbda diff --git a/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/__init__.py b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/__init__.py new file mode 100644 index 0000000000..5d5baf10ed --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/__init__.py @@ -0,0 +1,5 @@ +from pySDC.projects.Monodomain.problem_classes.ionicmodels.cpp.ionicmodels import HodgkinHuxley +from pySDC.projects.Monodomain.problem_classes.ionicmodels.cpp.ionicmodels import Courtemanche1998 +from pySDC.projects.Monodomain.problem_classes.ionicmodels.cpp.ionicmodels import TenTusscher2006_epi +from pySDC.projects.Monodomain.problem_classes.ionicmodels.cpp.ionicmodels import TenTusscher2006_epi_smooth +from pySDC.projects.Monodomain.problem_classes.ionicmodels.cpp.ionicmodels import BiStable diff --git a/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/bindings_definitions.cpp b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/bindings_definitions.cpp new file mode 100644 index 0000000000..e7c3a8685a --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/bindings_definitions.cpp @@ -0,0 +1,83 @@ +#include + +#include +#include +#include + +namespace py = pybind11; + +#include "ionicmodel.h" +#include "hodgkinhuxley.h" +#include "courtemanche.h" +#include "tentusscher.h" +#include "tentusscher_smooth.h" +#include "bistable.h" + +PYBIND11_MODULE(ionicmodels, m) +{ + m.doc() = ""; + + // A class to represent the ionic models. + py::class_ IonicModelPy(m, "IonicModel"); + IonicModelPy.def(py::init()); + IonicModelPy.def_property_readonly("f_expl_args", &IonicModel::get_f_expl_args, py::return_value_policy::copy); + IonicModelPy.def_property_readonly("f_exp_args", &IonicModel::get_f_exp_args, py::return_value_policy::copy); + IonicModelPy.def_property_readonly("f_expl_indeces", &IonicModel::get_f_expl_indeces, py::return_value_policy::copy); + IonicModelPy.def_property_readonly("f_exp_indeces", &IonicModel::get_f_exp_indeces, py::return_value_policy::copy); + IonicModelPy.def_property_readonly("size", &IonicModel::get_size, py::return_value_policy::copy); + + // A very simple ionic model with one variable one. It is used for testing purposes. With this model the + // monodomain equation reduces to a reaction-diffusion equation with one variable. + py::class_ BiStablePy(m, "BiStable"); + BiStablePy.def(py::init()); + BiStablePy.def("initial_values", &BiStable::initial_values, py::return_value_policy::copy); + BiStablePy.def("f", &BiStable::f); + BiStablePy.def("f_expl", &BiStable::f_expl); + BiStablePy.def("lmbda_exp", &BiStable::lmbda_exp); + BiStablePy.def("lmbda_yinf_exp", &BiStable::lmbda_yinf_exp); + BiStablePy.def("rho_f_expl", &BiStable::rho_f_expl); + + // The Hodgkin-Huxley ionic model. A model with 4 variables, smooth, nonstiff. Still an academic model. + py::class_ HodgkinHuxleyPy(m, "HodgkinHuxley"); + HodgkinHuxleyPy.def(py::init()); + HodgkinHuxleyPy.def("initial_values", &HodgkinHuxley::initial_values, py::return_value_policy::copy); + HodgkinHuxleyPy.def("f", &HodgkinHuxley::f); + HodgkinHuxleyPy.def("f_expl", &HodgkinHuxley::f_expl); + HodgkinHuxleyPy.def("lmbda_exp", &HodgkinHuxley::lmbda_exp); + HodgkinHuxleyPy.def("lmbda_yinf_exp", &HodgkinHuxley::lmbda_yinf_exp); + HodgkinHuxleyPy.def("rho_f_expl", &HodgkinHuxley::rho_f_expl); + + // The Courtemanche ionic model. A model with 21 variables, mildly stiff. It is a realistic model for the human atrial cells. + py::class_ Courtemanche1998Py(m, "Courtemanche1998"); + Courtemanche1998Py.def(py::init()); + Courtemanche1998Py.def("initial_values", &Courtemanche1998::initial_values, py::return_value_policy::copy); + Courtemanche1998Py.def("f", &Courtemanche1998::f); + Courtemanche1998Py.def("f_expl", &Courtemanche1998::f_expl); + Courtemanche1998Py.def("lmbda_exp", &Courtemanche1998::lmbda_exp); + Courtemanche1998Py.def("lmbda_yinf_exp", &Courtemanche1998::lmbda_yinf_exp); + Courtemanche1998Py.def("rho_f_expl", &Courtemanche1998::rho_f_expl); + + // The TenTusscher ionic model. A model with 20 variables, very stiff. It is a realistic model for the human ventricular cells. + py::class_ TenTusscher2006_epiPy(m, "TenTusscher2006_epi"); + TenTusscher2006_epiPy.def(py::init()); + TenTusscher2006_epiPy.def("initial_values", &TenTusscher2006_epi::initial_values, py::return_value_policy::copy); + TenTusscher2006_epiPy.def("f", &TenTusscher2006_epi::f); + TenTusscher2006_epiPy.def("f_expl", &TenTusscher2006_epi::f_expl); + TenTusscher2006_epiPy.def("lmbda_exp", &TenTusscher2006_epi::lmbda_exp); + TenTusscher2006_epiPy.def("lmbda_yinf_exp", &TenTusscher2006_epi::lmbda_yinf_exp); + TenTusscher2006_epiPy.def("rho_f_expl", &TenTusscher2006_epi::rho_f_expl); + + // A smoothed version TenTusscher ionic model. Indeed, in the right-hand side of the original model there are if-else clauses which are not differentiable. + // This model is a smoothed version of the original model, where the if-else clauses are removed by keeping the 'else' part of the clauses. + // The model is no more exact, from a physiological viewpoint, but the qualitative behavior is preserved. For instance it remains very stiff and + // action potentials are still propagated. Moreover, it is now differentiable. + // We use this model for convergence experiments only. + py::class_ TenTusscher2006_epi_smoothPy(m, "TenTusscher2006_epi_smooth"); + TenTusscher2006_epi_smoothPy.def(py::init()); + TenTusscher2006_epi_smoothPy.def("initial_values", &TenTusscher2006_epi_smooth::initial_values, py::return_value_policy::copy); + TenTusscher2006_epi_smoothPy.def("f", &TenTusscher2006_epi_smooth::f); + TenTusscher2006_epi_smoothPy.def("f_expl", &TenTusscher2006_epi_smooth::f_expl); + TenTusscher2006_epi_smoothPy.def("lmbda_exp", &TenTusscher2006_epi_smooth::lmbda_exp); + TenTusscher2006_epi_smoothPy.def("lmbda_yinf_exp", &TenTusscher2006_epi_smooth::lmbda_yinf_exp); + TenTusscher2006_epi_smoothPy.def("rho_f_expl", &TenTusscher2006_epi_smooth::rho_f_expl); +} \ No newline at end of file diff --git a/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/bistable.h b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/bistable.h new file mode 100644 index 0000000000..1e799636fa --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/bistable.h @@ -0,0 +1,88 @@ +#include +#include + +#include +#include +#include + +#include "ionicmodel.h" + +#ifndef BISTABLE +#define BISTABLE + +class BiStable : public IonicModel +{ +public: + BiStable(const double scale_); + ~BiStable(){}; + void f(py::array_t &y, py::array_t &fy); + void f_expl(py::array_t &y, py::array_t &fy); + void lmbda_exp(py::array_t &y_list, py::array_t &lmbda_list); + void lmbda_yinf_exp(py::array_t &y_list, py::array_t &lmbda_list, py::array_t &yinf_list); + py::list initial_values(); + double rho_f_expl(); + +private: + double V_th, V_depol, V_rest, a; +}; + +BiStable::BiStable(const double scale_) + : IonicModel(scale_) +{ + size = 1; + + // Set values of constants + V_th = -57.6; + V_depol = 30.; + V_rest = -85.; + a = 1.4e-3; + + assign(f_expl_args, {0}); + assign(f_exp_args, {}); + assign(f_expl_indeces, {0}); + assign(f_exp_indeces, {}); +} + +py::list BiStable::initial_values() +{ + py::list y0(size); + y0[0] = -85.0; + + return y0; +} + +void BiStable::f(py::array_t &y_list, py::array_t &fy_list) +{ + double *y_ptrs[size]; + double *fy_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(fy_list, fy_ptrs, N, n_dofs); + + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + fy_ptrs[0][j] = -scale * a * (y_ptrs[0][j] - V_th) * (y_ptrs[0][j] - V_depol) * (y_ptrs[0][j] - V_rest); +} + +void BiStable::f_expl(py::array_t &y_list, py::array_t &fy_list) +{ + this->f(y_list, fy_list); +} + +void BiStable::lmbda_exp(py::array_t &y_list, py::array_t &lmbda_list) +{ + return; +} + +void BiStable::lmbda_yinf_exp(py::array_t &y_list, py::array_t &lmbda_list, py::array_t &yinf_list) +{ + return; +} + +double BiStable::rho_f_expl() +{ + return 20.; +} + +#endif \ No newline at end of file diff --git a/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/compilation_command.txt b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/compilation_command.txt new file mode 100644 index 0000000000..695d087148 --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/compilation_command.txt @@ -0,0 +1,8 @@ +Linux: +c++ -O3 -Wall -shared -std=c++11 -fPIC -fvisibility=hidden $(python3 -m pybind11 --includes) bindings_definitions.cpp -o ionicmodels$(python3-config --extension-suffix) + +Daint: +c++ -march=haswell -O3 -Wall -shared -std=c++11 -fPIC -fvisibility=hidden $(python3 -m pybind11 --includes) bindings_definitions.cpp -o ionicmodels$(python3-config --extension-suffix) + +Mac: +c++ -O3 -Wall -shared -std=c++11 -undefined dynamic_lookup $(python3 -m pybind11 --includes) bindings_definitions.cpp -o ionicmodels$(python3-config --extension-suffix) \ No newline at end of file diff --git a/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/courtemanche.h b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/courtemanche.h new file mode 100644 index 0000000000..b73ea0fc50 --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/courtemanche.h @@ -0,0 +1,575 @@ +#include +#include + +#include +#include +#include + +#include "ionicmodel.h" + +#ifndef COURTEMANCHE +#define COURTEMANCHE + +class Courtemanche1998 : public IonicModel +{ +public: + Courtemanche1998(const double scale_); + ~Courtemanche1998(){}; + void f(py::array_t &y, py::array_t &fy); + void f_expl(py::array_t &y, py::array_t &fy); + void lmbda_exp(py::array_t &y_list, py::array_t &lmbda_list); + void lmbda_yinf_exp(py::array_t &y_list, py::array_t &lmbda_list, py::array_t &yinf_list); + py::list initial_values(); + double rho_f_expl(); + +private: + double AC_CMDN_max, AC_CSQN_max, AC_Km_CMDN, AC_Km_CSQN, AC_Km_TRPN, AC_TRPN_max, AC_I_up_max, AC_K_up, AC_tau_f_Ca, AC_Ca_o, AC_K_o, AC_Na_o; + double AC_tau_tr, AC_Ca_up_max, AC_K_rel, AC_tau_u, AC_g_Ca_L, AC_I_NaCa_max, AC_K_mCa, AC_K_mNa, AC_K_sat, AC_Na_Ca_exchanger_current_gamma; + double AC_g_B_Ca, AC_g_B_K, AC_g_B_Na, AC_g_Na, AC_V_cell, AC_V_i, AC_V_rel, AC_V_up, AC_Cm, AC_F, AC_R, AC_T, AC_g_Kr, AC_i_CaP_max, AC_g_Ks, AC_Km_K_o; + double AC_Km_Na_i, AC_i_NaK_max, AC_sigma, AC_g_K1, AC_K_Q10, AC_g_to; +}; + +Courtemanche1998::Courtemanche1998(const double scale_) + : IonicModel(scale_) +{ + size = 21; + + AC_CMDN_max = 0.05; + AC_CSQN_max = 10.0; + AC_Km_CMDN = 0.00238; + AC_Km_CSQN = 0.8; + AC_Km_TRPN = 0.0005; + AC_TRPN_max = 0.07; + AC_I_up_max = 0.005; + AC_K_up = 0.00092; + AC_tau_f_Ca = 2.0; + AC_Ca_o = 1.8; + AC_K_o = 5.4; + AC_Na_o = 140.0; + AC_tau_tr = 180.0; + AC_Ca_up_max = 15.0; + AC_K_rel = 30.0; + AC_tau_u = 8.0; + AC_g_Ca_L = 0.12375; + AC_I_NaCa_max = 1600.0; + AC_K_mCa = 1.38; + AC_K_mNa = 87.5; + AC_K_sat = 0.1; + AC_Na_Ca_exchanger_current_gamma = 0.35; + AC_g_B_Ca = 0.001131; + AC_g_B_K = 0.0; + AC_g_B_Na = 6.74437500000000015e-04; + AC_g_Na = 7.8; + AC_V_cell = 20100.0; + AC_V_i = AC_V_cell * 0.68; + AC_V_rel = 0.0048 * AC_V_cell; + AC_V_up = 0.0552 * AC_V_cell; + AC_Cm = 1.0; // 100.0; + AC_F = 96.4867; + AC_R = 8.3143; + AC_T = 310.0; + AC_g_Kr = 2.94117649999999994e-02; + AC_i_CaP_max = 0.275; + AC_g_Ks = 1.29411759999999987e-01; + AC_Km_K_o = 1.5; + AC_Km_Na_i = 10.0; + AC_i_NaK_max = 5.99338739999999981e-01; + AC_sigma = 1.0 / 7.0 * (exp(AC_Na_o / 67.3) - 1.0); + AC_g_K1 = 0.09; + AC_K_Q10 = 3.0; + AC_g_to = 0.1652; + + assign(f_expl_args, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); + assign(f_exp_args, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 15}); + assign(f_expl_indeces, {0, 12, 13, 14, 16, 17, 18, 19, 20}); + assign(f_exp_indeces, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 15}); +} + +py::list Courtemanche1998::initial_values() +{ + py::list y0(size); + y0[0] = -81.18; + y0[1] = 0.002908; + y0[2] = 0.9649; + y0[3] = 0.9775; + y0[4] = 0.03043; + y0[5] = 0.9992; + y0[6] = 0.004966; + y0[7] = 0.9986; + y0[8] = 3.296e-05; + y0[9] = 0.01869; + y0[10] = 0.0001367; + y0[11] = 0.9996; + y0[12] = 0.7755; + y0[13] = 2.35e-112; + y0[14] = 1.0; + y0[15] = 0.9992; + y0[16] = 11.17; + y0[17] = 0.0001013; + y0[18] = 139.0; + y0[19] = 1.488; + y0[20] = 1.488; + + return y0; +} + +void Courtemanche1998::f(py::array_t &y_list, py::array_t &fy_list) +{ + double *y_ptrs[size]; + double *fy_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(fy_list, fy_ptrs, N, n_dofs); + + double y[size]; + // For linear in gating var terms + double AV_tau_w, AV_w_infinity, AV_d_infinity, AV_tau_d, AV_f_infinity, AV_tau_f, AV_alpha_h, AV_beta_h, AV_h_inf, AV_tau_h, AV_alpha_j, AV_beta_j, AV_j_inf, AV_tau_j; + double AV_alpha_m, AV_beta_m, AV_m_inf, AV_tau_m, AV_alpha_xr, AV_beta_xr, AV_xr_infinity, AV_tau_xr, AV_alpha_xs, AV_beta_xs, AV_xs_infinity; + double AV_tau_xs, AV_alpha_oa, AV_beta_oa, AV_oa_infinity, AV_tau_oa, AV_oi_infinity; + double AV_alpha_oi, AV_beta_oi, AV_tau_oi, AV_alpha_ua, AV_beta_ua, AV_ua_infinity, AV_tau_ua, AV_alpha_ui, AV_beta_ui, AV_ui_infinity, AV_tau_ui; + // for nonlinear + double AV_f_Ca_infinity, AV_i_tr, AV_i_up_leak, AV_i_rel, AV_i_up, AV_i_CaP, AV_f_NaK, AV_i_NaK, AV_E_K, AV_i_K1, AV_i_to, AV_g_Kur, AV_i_Kur; + double AV_i_Ca_L, AV_i_NaCa, AV_E_Ca, AV_i_B_K, AV_E_Na, AV_i_Kr, AV_i_Ks, AV_Fn, AV_i_B_Ca, AV_i_B_Na, AV_i_Na, AV_u_infinity, AV_tau_v, AV_v_infinity, AV_B1, AV_B2; + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // # Linear (in the gating variables) terms + + // #/* Ca_release_current_from_JSR_w_gate */ + + AV_tau_w = abs(NV_Ith_S(y, 0) - 7.9) < 1e-10 ? 6.0 * 0.2 / 1.3 : 6.0 * (1.0 - exp((-(NV_Ith_S(y, 0) - 7.9)) / 5.0)) / ((1.0 + 0.3 * exp((-(NV_Ith_S(y, 0) - 7.9)) / 5.0)) * 1.0 * (NV_Ith_S(y, 0) - 7.9)); + AV_w_infinity = 1.0 - pow(1.0 + exp((-(NV_Ith_S(y, 0) - 40.0)) / 17.0), (-1.0)); + fy_ptrs[15][j] = (AV_w_infinity - NV_Ith_S(y, 15)) / AV_tau_w; + + // #/* L_type_Ca_channel_d_gate */ + AV_d_infinity = pow(1.0 + exp((NV_Ith_S(y, 0) + 10.0) / (-8.0)), (-1.0)); + AV_tau_d = abs(NV_Ith_S(y, 0) + 10.0) < 1e-10 ? 4.579 / (1.0 + exp((NV_Ith_S(y, 0) + 10.0) / (-6.24))) : (1.0 - exp((NV_Ith_S(y, 0) + 10.0) / (-6.24))) / (0.035 * (NV_Ith_S(y, 0) + 10.0) * (1.0 + exp((NV_Ith_S(y, 0) + 10.0) / (-6.24)))); + fy_ptrs[10][j] = (AV_d_infinity - NV_Ith_S(y, 10)) / AV_tau_d; + + // #/* L_type_Ca_channel_f_gate */ + AV_f_infinity = exp((-(NV_Ith_S(y, 0) + 28.0)) / 6.9) / (1.0 + exp((-(NV_Ith_S(y, 0) + 28.0)) / 6.9)); + AV_tau_f = 9.0 * pow(0.0197 * exp((-pow(0.0337, 2.0)) * pow(NV_Ith_S(y, 0) + 10.0, 2.0)) + 0.02, (-1.0)); + fy_ptrs[11][j] = (AV_f_infinity - NV_Ith_S(y, 11)) / AV_tau_f; + + // #/* fast_sodium_current_h_gate */ + AV_alpha_h = NV_Ith_S(y, 0) < (-40.0) ? 0.135 * exp((NV_Ith_S(y, 0) + 80.0) / (-6.8)) : 0.0; + AV_beta_h = NV_Ith_S(y, 0) < (-40.0) ? 3.56 * exp(0.079 * NV_Ith_S(y, 0)) + 310000.0 * exp(0.35 * NV_Ith_S(y, 0)) : 1.0 / (0.13 * (1.0 + exp((NV_Ith_S(y, 0) + 10.66) / (-11.1)))); + AV_h_inf = AV_alpha_h / (AV_alpha_h + AV_beta_h); + AV_tau_h = 1.0 / (AV_alpha_h + AV_beta_h); + fy_ptrs[2][j] = (AV_h_inf - NV_Ith_S(y, 2)) / AV_tau_h; + + // #/* fast_sodium_current_j_gate */ + AV_alpha_j = NV_Ith_S(y, 0) < (-40.0) ? ((-127140.0) * exp(0.2444 * NV_Ith_S(y, 0)) - 3.474e-05 * exp((-0.04391) * NV_Ith_S(y, 0))) * (NV_Ith_S(y, 0) + 37.78) / (1.0 + exp(0.311 * (NV_Ith_S(y, 0) + 79.23))) : 0.0; + AV_beta_j = NV_Ith_S(y, 0) < (-40.0) ? 0.1212 * exp((-0.01052) * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1378) * (NV_Ith_S(y, 0) + 40.14))) : 0.3 * exp((-2.535e-07) * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1) * (NV_Ith_S(y, 0) + 32.0))); + AV_j_inf = AV_alpha_j / (AV_alpha_j + AV_beta_j); + AV_tau_j = 1.0 / (AV_alpha_j + AV_beta_j); + fy_ptrs[3][j] = (AV_j_inf - NV_Ith_S(y, 3)) / AV_tau_j; + + // #/* fast_sodium_current_m_gate */ + AV_alpha_m = NV_Ith_S(y, 0) == (-47.13) ? 3.2 : 0.32 * (NV_Ith_S(y, 0) + 47.13) / (1.0 - exp((-0.1) * (NV_Ith_S(y, 0) + 47.13))); + AV_beta_m = 0.08 * exp((-NV_Ith_S(y, 0)) / 11.0); + AV_m_inf = AV_alpha_m / (AV_alpha_m + AV_beta_m); + AV_tau_m = 1.0 / (AV_alpha_m + AV_beta_m); + fy_ptrs[1][j] = (AV_m_inf - NV_Ith_S(y, 1)) / AV_tau_m; + + // #/* rapid_delayed_rectifier_K_current_xr_gate */ + AV_alpha_xr = abs(NV_Ith_S(y, 0) + 14.1) < 1e-10 ? 0.0015 : 0.0003 * (NV_Ith_S(y, 0) + 14.1) / (1.0 - exp((NV_Ith_S(y, 0) + 14.1) / (-5.0))); + AV_beta_xr = abs(NV_Ith_S(y, 0) - 3.3328) < 1e-10 ? 3.78361180000000004e-04 : 7.38980000000000030e-05 * (NV_Ith_S(y, 0) - 3.3328) / (exp((NV_Ith_S(y, 0) - 3.3328) / 5.1237) - 1.0); + AV_xr_infinity = pow(1.0 + exp((NV_Ith_S(y, 0) + 14.1) / (-6.5)), (-1.0)); + AV_tau_xr = pow(AV_alpha_xr + AV_beta_xr, (-1.0)); + fy_ptrs[8][j] = (AV_xr_infinity - NV_Ith_S(y, 8)) / AV_tau_xr; + + // #/* slow_delayed_rectifier_K_current_xs_gate */ + AV_alpha_xs = abs(NV_Ith_S(y, 0) - 19.9) < 1e-10 ? 0.00068 : 4e-05 * (NV_Ith_S(y, 0) - 19.9) / (1.0 - exp((NV_Ith_S(y, 0) - 19.9) / (-17.0))); + AV_beta_xs = abs(NV_Ith_S(y, 0) - 19.9) < 1e-10 ? 0.000315 : 3.5e-05 * (NV_Ith_S(y, 0) - 19.9) / (exp((NV_Ith_S(y, 0) - 19.9) / 9.0) - 1.0); + AV_xs_infinity = pow(1.0 + exp((NV_Ith_S(y, 0) - 19.9) / (-12.7)), (-0.5)); + AV_tau_xs = 0.5 * pow(AV_alpha_xs + AV_beta_xs, (-1.0)); + fy_ptrs[9][j] = (AV_xs_infinity - NV_Ith_S(y, 9)) / AV_tau_xs; + + // #/* transient_outward_K_current_oa_gate */ + AV_alpha_oa = 0.65 * pow(exp((NV_Ith_S(y, 0) - (-10.0)) / (-8.5)) + exp((NV_Ith_S(y, 0) - (-10.0) - 40.0) / (-59.0)), (-1.0)); + AV_beta_oa = 0.65 * pow(2.5 + exp((NV_Ith_S(y, 0) - (-10.0) + 72.0) / 17.0), (-1.0)); + AV_oa_infinity = pow(1.0 + exp((NV_Ith_S(y, 0) - (-10.0) + 10.47) / (-17.54)), (-1.0)); + AV_tau_oa = pow(AV_alpha_oa + AV_beta_oa, (-1.0)) / AC_K_Q10; + fy_ptrs[4][j] = (AV_oa_infinity - NV_Ith_S(y, 4)) / AV_tau_oa; + + // #/* transient_outward_K_current_oi_gate */ + AV_alpha_oi = pow(18.53 + 1.0 * exp((NV_Ith_S(y, 0) - (-10.0) + 103.7) / 10.95), (-1.0)); + AV_beta_oi = pow(35.56 + 1.0 * exp((NV_Ith_S(y, 0) - (-10.0) - 8.74) / (-7.44)), (-1.0)); + AV_oi_infinity = pow(1.0 + exp((NV_Ith_S(y, 0) - (-10.0) + 33.1) / 5.3), (-1.0)); + AV_tau_oi = pow(AV_alpha_oi + AV_beta_oi, (-1.0)) / AC_K_Q10; + fy_ptrs[5][j] = (AV_oi_infinity - NV_Ith_S(y, 5)) / AV_tau_oi; + + // #/* ultrarapid_delayed_rectifier_K_current_ua_gate */ + AV_alpha_ua = 0.65 * pow(exp((NV_Ith_S(y, 0) - (-10.0)) / (-8.5)) + exp((NV_Ith_S(y, 0) - (-10.0) - 40.0) / (-59.0)), (-1.0)); + AV_beta_ua = 0.65 * pow(2.5 + exp((NV_Ith_S(y, 0) - (-10.0) + 72.0) / 17.0), (-1.0)); + AV_ua_infinity = pow(1.0 + exp((NV_Ith_S(y, 0) - (-10.0) + 20.3) / (-9.6)), (-1.0)); + AV_tau_ua = pow(AV_alpha_ua + AV_beta_ua, (-1.0)) / AC_K_Q10; + fy_ptrs[6][j] = (AV_ua_infinity - NV_Ith_S(y, 6)) / AV_tau_ua; + + // #/* ultrarapid_delayed_rectifier_K_current_ui_gate */ + AV_alpha_ui = pow(21.0 + 1.0 * exp((NV_Ith_S(y, 0) - (-10.0) - 195.0) / (-28.0)), (-1.0)); + AV_beta_ui = 1.0 / exp((NV_Ith_S(y, 0) - (-10.0) - 168.0) / (-16.0)); + AV_ui_infinity = pow(1.0 + exp((NV_Ith_S(y, 0) - (-10.0) - 109.45) / 27.48), (-1.0)); + AV_tau_ui = pow(AV_alpha_ui + AV_beta_ui, (-1.0)) / AC_K_Q10; + fy_ptrs[7][j] = (AV_ui_infinity - NV_Ith_S(y, 7)) / AV_tau_ui; + + // # Non Linear (in the gating variables) terms + + // #/* L_type_Ca_channel_f_Ca_gate */ + AV_f_Ca_infinity = pow(1.0 + NV_Ith_S(y, 17) / 0.00035, (-1.0)); + fy_ptrs[12][j] = (AV_f_Ca_infinity - NV_Ith_S(y, 12)) / AC_tau_f_Ca; + + // #/* transfer_current_from_NSR_to_JSR */ + AV_i_tr = (NV_Ith_S(y, 20) - NV_Ith_S(y, 19)) / AC_tau_tr; + + // #/* Ca_leak_current_by_the_NSR */ + AV_i_up_leak = AC_I_up_max * NV_Ith_S(y, 20) / AC_Ca_up_max; + + // #/* Ca_release_current_from_JSR */ + AV_i_rel = AC_K_rel * pow(NV_Ith_S(y, 13), 2.0) * NV_Ith_S(y, 14) * NV_Ith_S(y, 15) * (NV_Ith_S(y, 19) - NV_Ith_S(y, 17)); + + // #/* intracellular_ion_concentrations */ + fy_ptrs[19][j] = (AV_i_tr - AV_i_rel) * pow(1.0 + AC_CSQN_max * AC_Km_CSQN / pow(NV_Ith_S(y, 19) + AC_Km_CSQN, 2.0), (-1.0)); + + // #/* Ca_uptake_current_by_the_NSR */ + AV_i_up = AC_I_up_max / (1.0 + AC_K_up / NV_Ith_S(y, 17)); + fy_ptrs[20][j] = AV_i_up - (AV_i_up_leak + AV_i_tr * AC_V_rel / AC_V_up); + + // #/* sarcolemmal_calcium_pump_current */ + AV_i_CaP = AC_Cm * AC_i_CaP_max * NV_Ith_S(y, 17) / (0.0005 + NV_Ith_S(y, 17)); + + // #/* sodium_potassium_pump */ + AV_f_NaK = pow(1.0 + 0.1245 * exp((-0.1) * AC_F * NV_Ith_S(y, 0) / (AC_R * AC_T)) + 0.0365 * AC_sigma * exp((-AC_F) * NV_Ith_S(y, 0) / (AC_R * AC_T)), (-1.0)); + AV_i_NaK = AC_Cm * AC_i_NaK_max * AV_f_NaK * 1.0 / (1.0 + pow(AC_Km_Na_i / NV_Ith_S(y, 16), 1.5)) * AC_K_o / (AC_K_o + AC_Km_K_o); + + // #/* time_independent_potassium_current */ + AV_E_K = AC_R * AC_T / AC_F * log(AC_K_o / NV_Ith_S(y, 18)); + AV_i_K1 = AC_Cm * AC_g_K1 * (NV_Ith_S(y, 0) - AV_E_K) / (1.0 + exp(0.07 * (NV_Ith_S(y, 0) + 80.0))); + + // #/* transient_outward_K_current */ + AV_i_to = AC_Cm * AC_g_to * pow(NV_Ith_S(y, 4), 3.0) * NV_Ith_S(y, 5) * (NV_Ith_S(y, 0) - AV_E_K); + + // #/* ultrarapid_delayed_rectifier_K_current */ + AV_g_Kur = 0.005 + 0.05 / (1.0 + exp((NV_Ith_S(y, 0) - 15.0) / (-13.0))); + AV_i_Kur = AC_Cm * AV_g_Kur * pow(NV_Ith_S(y, 6), 3.0) * NV_Ith_S(y, 7) * (NV_Ith_S(y, 0) - AV_E_K); + + // #/* *remaining* */ + AV_i_Ca_L = AC_Cm * AC_g_Ca_L * NV_Ith_S(y, 10) * NV_Ith_S(y, 11) * NV_Ith_S(y, 12) * (NV_Ith_S(y, 0) - 65.0); + AV_i_NaCa = AC_Cm * AC_I_NaCa_max * (exp(AC_Na_Ca_exchanger_current_gamma * AC_F * NV_Ith_S(y, 0) / (AC_R * AC_T)) * pow(NV_Ith_S(y, 16), 3.0) * AC_Ca_o - exp((AC_Na_Ca_exchanger_current_gamma - 1.0) * AC_F * NV_Ith_S(y, 0) / (AC_R * AC_T)) * pow(AC_Na_o, 3.0) * NV_Ith_S(y, 17)) / ((pow(AC_K_mNa, 3.0) + pow(AC_Na_o, 3.0)) * (AC_K_mCa + AC_Ca_o) * (1.0 + AC_K_sat * exp((AC_Na_Ca_exchanger_current_gamma - 1.0) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)))); + AV_E_Ca = AC_R * AC_T / (2.0 * AC_F) * log(AC_Ca_o / NV_Ith_S(y, 17)); + AV_i_B_K = AC_Cm * AC_g_B_K * (NV_Ith_S(y, 0) - AV_E_K); + AV_E_Na = AC_R * AC_T / AC_F * log(AC_Na_o / NV_Ith_S(y, 16)); + AV_i_Kr = AC_Cm * AC_g_Kr * NV_Ith_S(y, 8) * (NV_Ith_S(y, 0) - AV_E_K) / (1.0 + exp((NV_Ith_S(y, 0) + 15.0) / 22.4)); + AV_i_Ks = AC_Cm * AC_g_Ks * pow(NV_Ith_S(y, 9), 2.0) * (NV_Ith_S(y, 0) - AV_E_K); + AV_Fn = 1000.0 * (1e-15 * AC_V_rel * AV_i_rel - 1e-15 / (2.0 * AC_F) * (0.5 * AV_i_Ca_L - 0.2 * AV_i_NaCa)); + AV_i_B_Ca = AC_Cm * AC_g_B_Ca * (NV_Ith_S(y, 0) - AV_E_Ca); + AV_i_B_Na = AC_Cm * AC_g_B_Na * (NV_Ith_S(y, 0) - AV_E_Na); + AV_i_Na = AC_Cm * AC_g_Na * pow(NV_Ith_S(y, 1), 3.0) * NV_Ith_S(y, 2) * NV_Ith_S(y, 3) * (NV_Ith_S(y, 0) - AV_E_Na); + fy_ptrs[18][j] = (2.0 * AV_i_NaK - (AV_i_K1 + AV_i_to + AV_i_Kur + AV_i_Kr + AV_i_Ks + AV_i_B_K)) / (AC_V_i * AC_F); + AV_u_infinity = pow(1.0 + exp((-(AV_Fn - 3.41749999999999983e-13)) / 1.367e-15), (-1.0)); + AV_tau_v = 1.91 + 2.09 * pow(1.0 + exp((-(AV_Fn - 3.41749999999999983e-13)) / 1.367e-15), (-1.0)); + AV_v_infinity = 1.0 - pow(1.0 + exp((-(AV_Fn - 6.835e-14)) / 1.367e-15), (-1.0)); + fy_ptrs[16][j] = ((-3.0) * AV_i_NaK - (3.0 * AV_i_NaCa + AV_i_B_Na + AV_i_Na)) / (AC_V_i * AC_F); + + fy_ptrs[0][j] = scale * (-(AV_i_Na + AV_i_K1 + AV_i_to + AV_i_Kur + AV_i_Kr + AV_i_Ks + AV_i_B_Na + AV_i_B_Ca + AV_i_NaK + AV_i_CaP + AV_i_NaCa + AV_i_Ca_L)) / AC_Cm; + fy_ptrs[13][j] = (AV_u_infinity - NV_Ith_S(y, 13)) / AC_tau_u; + fy_ptrs[14][j] = (AV_v_infinity - NV_Ith_S(y, 14)) / AV_tau_v; + + AV_B1 = (2.0 * AV_i_NaCa - (AV_i_CaP + AV_i_Ca_L + AV_i_B_Ca)) / (2.0 * AC_V_i * AC_F) + (AC_V_up * (AV_i_up_leak - AV_i_up) + AV_i_rel * AC_V_rel) / AC_V_i; + AV_B2 = 1.0 + AC_TRPN_max * AC_Km_TRPN / pow(NV_Ith_S(y, 17) + AC_Km_TRPN, 2.0) + AC_CMDN_max * AC_Km_CMDN / pow(NV_Ith_S(y, 17) + AC_Km_CMDN, 2.0); + fy_ptrs[17][j] = AV_B1 / AV_B2; + } +} + +void Courtemanche1998::f_expl(py::array_t &y_list, py::array_t &fy_list) +{ + double *y_ptrs[size]; + double *fy_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(fy_list, fy_ptrs, N, n_dofs); + + double y[size]; + // for nonlinear + double AV_f_Ca_infinity, AV_i_tr, AV_i_up_leak, AV_i_rel, AV_i_up, AV_i_CaP, AV_f_NaK, AV_i_NaK, AV_E_K, AV_i_K1, AV_i_to, AV_g_Kur, AV_i_Kur; + double AV_i_Ca_L, AV_i_NaCa, AV_E_Ca, AV_i_B_K, AV_E_Na, AV_i_Kr, AV_i_Ks, AV_Fn, AV_i_B_Ca, AV_i_B_Na, AV_i_Na, AV_u_infinity, AV_tau_v, AV_v_infinity, AV_B1, AV_B2; + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // #/* L_type_Ca_channel_f_Ca_gate */ + AV_f_Ca_infinity = pow(1.0 + NV_Ith_S(y, 17) / 0.00035, (-1.0)); + fy_ptrs[12][j] = (AV_f_Ca_infinity - NV_Ith_S(y, 12)) / AC_tau_f_Ca; + + // #/* transfer_current_from_NSR_to_JSR */ + AV_i_tr = (NV_Ith_S(y, 20) - NV_Ith_S(y, 19)) / AC_tau_tr; + + // #/* Ca_leak_current_by_the_NSR */ + AV_i_up_leak = AC_I_up_max * NV_Ith_S(y, 20) / AC_Ca_up_max; + + // #/* Ca_release_current_from_JSR */ + AV_i_rel = AC_K_rel * pow(NV_Ith_S(y, 13), 2.0) * NV_Ith_S(y, 14) * NV_Ith_S(y, 15) * (NV_Ith_S(y, 19) - NV_Ith_S(y, 17)); + + // #/* intracellular_ion_concentrations */ + fy_ptrs[19][j] = (AV_i_tr - AV_i_rel) * pow(1.0 + AC_CSQN_max * AC_Km_CSQN / pow(NV_Ith_S(y, 19) + AC_Km_CSQN, 2.0), (-1.0)); + + // #/* Ca_uptake_current_by_the_NSR */ + AV_i_up = AC_I_up_max / (1.0 + AC_K_up / NV_Ith_S(y, 17)); + fy_ptrs[20][j] = AV_i_up - (AV_i_up_leak + AV_i_tr * AC_V_rel / AC_V_up); + + // #/* sarcolemmal_calcium_pump_current */ + AV_i_CaP = AC_Cm * AC_i_CaP_max * NV_Ith_S(y, 17) / (0.0005 + NV_Ith_S(y, 17)); + + // #/* sodium_potassium_pump */ + AV_f_NaK = pow(1.0 + 0.1245 * exp((-0.1) * AC_F * NV_Ith_S(y, 0) / (AC_R * AC_T)) + 0.0365 * AC_sigma * exp((-AC_F) * NV_Ith_S(y, 0) / (AC_R * AC_T)), (-1.0)); + AV_i_NaK = AC_Cm * AC_i_NaK_max * AV_f_NaK * 1.0 / (1.0 + pow(AC_Km_Na_i / NV_Ith_S(y, 16), 1.5)) * AC_K_o / (AC_K_o + AC_Km_K_o); + + // #/* time_independent_potassium_current */ + AV_E_K = AC_R * AC_T / AC_F * log(AC_K_o / NV_Ith_S(y, 18)); + AV_i_K1 = AC_Cm * AC_g_K1 * (NV_Ith_S(y, 0) - AV_E_K) / (1.0 + exp(0.07 * (NV_Ith_S(y, 0) + 80.0))); + + // #/* transient_outward_K_current */ + AV_i_to = AC_Cm * AC_g_to * pow(NV_Ith_S(y, 4), 3.0) * NV_Ith_S(y, 5) * (NV_Ith_S(y, 0) - AV_E_K); + + // #/* ultrarapid_delayed_rectifier_K_current */ + AV_g_Kur = 0.005 + 0.05 / (1.0 + exp((NV_Ith_S(y, 0) - 15.0) / (-13.0))); + AV_i_Kur = AC_Cm * AV_g_Kur * pow(NV_Ith_S(y, 6), 3.0) * NV_Ith_S(y, 7) * (NV_Ith_S(y, 0) - AV_E_K); + + // #/* *remaining* */ + AV_i_Ca_L = AC_Cm * AC_g_Ca_L * NV_Ith_S(y, 10) * NV_Ith_S(y, 11) * NV_Ith_S(y, 12) * (NV_Ith_S(y, 0) - 65.0); + AV_i_NaCa = AC_Cm * AC_I_NaCa_max * (exp(AC_Na_Ca_exchanger_current_gamma * AC_F * NV_Ith_S(y, 0) / (AC_R * AC_T)) * pow(NV_Ith_S(y, 16), 3.0) * AC_Ca_o - exp((AC_Na_Ca_exchanger_current_gamma - 1.0) * AC_F * NV_Ith_S(y, 0) / (AC_R * AC_T)) * pow(AC_Na_o, 3.0) * NV_Ith_S(y, 17)) / ((pow(AC_K_mNa, 3.0) + pow(AC_Na_o, 3.0)) * (AC_K_mCa + AC_Ca_o) * (1.0 + AC_K_sat * exp((AC_Na_Ca_exchanger_current_gamma - 1.0) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)))); + AV_E_Ca = AC_R * AC_T / (2.0 * AC_F) * log(AC_Ca_o / NV_Ith_S(y, 17)); + AV_i_B_K = AC_Cm * AC_g_B_K * (NV_Ith_S(y, 0) - AV_E_K); + AV_E_Na = AC_R * AC_T / AC_F * log(AC_Na_o / NV_Ith_S(y, 16)); + AV_i_Kr = AC_Cm * AC_g_Kr * NV_Ith_S(y, 8) * (NV_Ith_S(y, 0) - AV_E_K) / (1.0 + exp((NV_Ith_S(y, 0) + 15.0) / 22.4)); + AV_i_Ks = AC_Cm * AC_g_Ks * pow(NV_Ith_S(y, 9), 2.0) * (NV_Ith_S(y, 0) - AV_E_K); + AV_Fn = 1000.0 * (1e-15 * AC_V_rel * AV_i_rel - 1e-15 / (2.0 * AC_F) * (0.5 * AV_i_Ca_L - 0.2 * AV_i_NaCa)); + AV_i_B_Ca = AC_Cm * AC_g_B_Ca * (NV_Ith_S(y, 0) - AV_E_Ca); + AV_i_B_Na = AC_Cm * AC_g_B_Na * (NV_Ith_S(y, 0) - AV_E_Na); + AV_i_Na = AC_Cm * AC_g_Na * pow(NV_Ith_S(y, 1), 3.0) * NV_Ith_S(y, 2) * NV_Ith_S(y, 3) * (NV_Ith_S(y, 0) - AV_E_Na); + fy_ptrs[18][j] = (2.0 * AV_i_NaK - (AV_i_K1 + AV_i_to + AV_i_Kur + AV_i_Kr + AV_i_Ks + AV_i_B_K)) / (AC_V_i * AC_F); + AV_u_infinity = pow(1.0 + exp((-(AV_Fn - 3.41749999999999983e-13)) / 1.367e-15), (-1.0)); + AV_tau_v = 1.91 + 2.09 * pow(1.0 + exp((-(AV_Fn - 3.41749999999999983e-13)) / 1.367e-15), (-1.0)); + AV_v_infinity = 1.0 - pow(1.0 + exp((-(AV_Fn - 6.835e-14)) / 1.367e-15), (-1.0)); + fy_ptrs[16][j] = ((-3.0) * AV_i_NaK - (3.0 * AV_i_NaCa + AV_i_B_Na + AV_i_Na)) / (AC_V_i * AC_F); + + fy_ptrs[0][j] = scale * (-(AV_i_Na + AV_i_K1 + AV_i_to + AV_i_Kur + AV_i_Kr + AV_i_Ks + AV_i_B_Na + AV_i_B_Ca + AV_i_NaK + AV_i_CaP + AV_i_NaCa + AV_i_Ca_L)) / AC_Cm; + fy_ptrs[13][j] = (AV_u_infinity - NV_Ith_S(y, 13)) / AC_tau_u; + fy_ptrs[14][j] = (AV_v_infinity - NV_Ith_S(y, 14)) / AV_tau_v; + + AV_B1 = (2.0 * AV_i_NaCa - (AV_i_CaP + AV_i_Ca_L + AV_i_B_Ca)) / (2.0 * AC_V_i * AC_F) + (AC_V_up * (AV_i_up_leak - AV_i_up) + AV_i_rel * AC_V_rel) / AC_V_i; + AV_B2 = 1.0 + AC_TRPN_max * AC_Km_TRPN / pow(NV_Ith_S(y, 17) + AC_Km_TRPN, 2.0) + AC_CMDN_max * AC_Km_CMDN / pow(NV_Ith_S(y, 17) + AC_Km_CMDN, 2.0); + fy_ptrs[17][j] = AV_B1 / AV_B2; + } +} + +void Courtemanche1998::lmbda_yinf_exp(py::array_t &y_list, py::array_t &lmbda_list, py::array_t &yinf_list) +{ + double *y_ptrs[size]; + double *lmbda_ptrs[size]; + double *yinf_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(lmbda_list, lmbda_ptrs, N, n_dofs); + get_raw_data(yinf_list, yinf_ptrs, N, n_dofs); + + double y[size]; + double AV_tau_w, AV_tau_d, AV_tau_f, AV_alpha_h, AV_beta_h, AV_tau_h, AV_alpha_j, AV_beta_j, AV_tau_j; + double AV_alpha_m, AV_beta_m, AV_tau_m, AV_alpha_xr, AV_beta_xr, AV_tau_xr, AV_alpha_xs, AV_beta_xs; + double AV_tau_xs, AV_alpha_oa, AV_beta_oa, AV_tau_oa; + double AV_alpha_oi, AV_beta_oi, AV_tau_oi, AV_alpha_ua, AV_beta_ua, AV_tau_ua, AV_alpha_ui, AV_beta_ui, AV_tau_ui; + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // # Linear (in the gating variables) terms + + // #/* Ca_release_current_from_JSR_w_gate */ + + AV_tau_w = abs(NV_Ith_S(y, 0) - 7.9) < 1e-10 ? 6.0 * 0.2 / 1.3 : 6.0 * (1.0 - exp((-(NV_Ith_S(y, 0) - 7.9)) / 5.0)) / ((1.0 + 0.3 * exp((-(NV_Ith_S(y, 0) - 7.9)) / 5.0)) * 1.0 * (NV_Ith_S(y, 0) - 7.9)); + lmbda_ptrs[15][j] = -1. / AV_tau_w; + yinf_ptrs[15][j] = 1.0 - pow(1.0 + exp((-(NV_Ith_S(y, 0) - 40.0)) / 17.0), (-1.0)); + + // #/* L_type_Ca_channel_d_gate */ + yinf_ptrs[10][j] = pow(1.0 + exp((NV_Ith_S(y, 0) + 10.0) / (-8.0)), (-1.0)); + AV_tau_d = abs(NV_Ith_S(y, 0) + 10.0) < 1e-10 ? 4.579 / (1.0 + exp((NV_Ith_S(y, 0) + 10.0) / (-6.24))) : (1.0 - exp((NV_Ith_S(y, 0) + 10.0) / (-6.24))) / (0.035 * (NV_Ith_S(y, 0) + 10.0) * (1.0 + exp((NV_Ith_S(y, 0) + 10.0) / (-6.24)))); + lmbda_ptrs[10][j] = -1. / AV_tau_d; + + // #/* L_type_Ca_channel_f_gate */ + yinf_ptrs[11][j] = exp((-(NV_Ith_S(y, 0) + 28.0)) / 6.9) / (1.0 + exp((-(NV_Ith_S(y, 0) + 28.0)) / 6.9)); + AV_tau_f = 9.0 * pow(0.0197 * exp((-pow(0.0337, 2.0)) * pow(NV_Ith_S(y, 0) + 10.0, 2.0)) + 0.02, (-1.0)); + lmbda_ptrs[11][j] = -1. / AV_tau_f; + + // #/* fast_sodium_current_h_gate */ + AV_alpha_h = NV_Ith_S(y, 0) < (-40.0) ? 0.135 * exp((NV_Ith_S(y, 0) + 80.0) / (-6.8)) : 0.0; + AV_beta_h = NV_Ith_S(y, 0) < (-40.0) ? 3.56 * exp(0.079 * NV_Ith_S(y, 0)) + 310000.0 * exp(0.35 * NV_Ith_S(y, 0)) : 1.0 / (0.13 * (1.0 + exp((NV_Ith_S(y, 0) + 10.66) / (-11.1)))); + yinf_ptrs[2][j] = AV_alpha_h / (AV_alpha_h + AV_beta_h); + AV_tau_h = 1.0 / (AV_alpha_h + AV_beta_h); + lmbda_ptrs[2][j] = -1. / AV_tau_h; + + // #/* fast_sodium_current_j_gate */ + AV_alpha_j = NV_Ith_S(y, 0) < (-40.0) ? ((-127140.0) * exp(0.2444 * NV_Ith_S(y, 0)) - 3.474e-05 * exp((-0.04391) * NV_Ith_S(y, 0))) * (NV_Ith_S(y, 0) + 37.78) / (1.0 + exp(0.311 * (NV_Ith_S(y, 0) + 79.23))) : 0.0; + AV_beta_j = NV_Ith_S(y, 0) < (-40.0) ? 0.1212 * exp((-0.01052) * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1378) * (NV_Ith_S(y, 0) + 40.14))) : 0.3 * exp((-2.535e-07) * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1) * (NV_Ith_S(y, 0) + 32.0))); + yinf_ptrs[3][j] = AV_alpha_j / (AV_alpha_j + AV_beta_j); + AV_tau_j = 1.0 / (AV_alpha_j + AV_beta_j); + lmbda_ptrs[3][j] = -1. / AV_tau_j; + + // #/* fast_sodium_current_m_gate */ + AV_alpha_m = NV_Ith_S(y, 0) == (-47.13) ? 3.2 : 0.32 * (NV_Ith_S(y, 0) + 47.13) / (1.0 - exp((-0.1) * (NV_Ith_S(y, 0) + 47.13))); + AV_beta_m = 0.08 * exp((-NV_Ith_S(y, 0)) / 11.0); + yinf_ptrs[1][j] = AV_alpha_m / (AV_alpha_m + AV_beta_m); + AV_tau_m = 1.0 / (AV_alpha_m + AV_beta_m); + lmbda_ptrs[1][j] = -1. / AV_tau_m; + + // #/* rapid_delayed_rectifier_K_current_xr_gate */ + AV_alpha_xr = abs(NV_Ith_S(y, 0) + 14.1) < 1e-10 ? 0.0015 : 0.0003 * (NV_Ith_S(y, 0) + 14.1) / (1.0 - exp((NV_Ith_S(y, 0) + 14.1) / (-5.0))); + AV_beta_xr = abs(NV_Ith_S(y, 0) - 3.3328) < 1e-10 ? 3.78361180000000004e-04 : 7.38980000000000030e-05 * (NV_Ith_S(y, 0) - 3.3328) / (exp((NV_Ith_S(y, 0) - 3.3328) / 5.1237) - 1.0); + yinf_ptrs[8][j] = pow(1.0 + exp((NV_Ith_S(y, 0) + 14.1) / (-6.5)), (-1.0)); + AV_tau_xr = pow(AV_alpha_xr + AV_beta_xr, (-1.0)); + lmbda_ptrs[8][j] = -1. / AV_tau_xr; + + // #/* slow_delayed_rectifier_K_current_xs_gate */ + AV_alpha_xs = abs(NV_Ith_S(y, 0) - 19.9) < 1e-10 ? 0.00068 : 4e-05 * (NV_Ith_S(y, 0) - 19.9) / (1.0 - exp((NV_Ith_S(y, 0) - 19.9) / (-17.0))); + AV_beta_xs = abs(NV_Ith_S(y, 0) - 19.9) < 1e-10 ? 0.000315 : 3.5e-05 * (NV_Ith_S(y, 0) - 19.9) / (exp((NV_Ith_S(y, 0) - 19.9) / 9.0) - 1.0); + yinf_ptrs[9][j] = pow(1.0 + exp((NV_Ith_S(y, 0) - 19.9) / (-12.7)), (-0.5)); + AV_tau_xs = 0.5 * pow(AV_alpha_xs + AV_beta_xs, (-1.0)); + lmbda_ptrs[9][j] = -1. / AV_tau_xs; + + // #/* transient_outward_K_current_oa_gate */ + AV_alpha_oa = 0.65 * pow(exp((NV_Ith_S(y, 0) - (-10.0)) / (-8.5)) + exp((NV_Ith_S(y, 0) - (-10.0) - 40.0) / (-59.0)), (-1.0)); + AV_beta_oa = 0.65 * pow(2.5 + exp((NV_Ith_S(y, 0) - (-10.0) + 72.0) / 17.0), (-1.0)); + yinf_ptrs[4][j] = pow(1.0 + exp((NV_Ith_S(y, 0) - (-10.0) + 10.47) / (-17.54)), (-1.0)); + AV_tau_oa = pow(AV_alpha_oa + AV_beta_oa, (-1.0)) / AC_K_Q10; + lmbda_ptrs[4][j] = -1. / AV_tau_oa; + + // #/* transient_outward_K_current_oi_gate */ + AV_alpha_oi = pow(18.53 + 1.0 * exp((NV_Ith_S(y, 0) - (-10.0) + 103.7) / 10.95), (-1.0)); + AV_beta_oi = pow(35.56 + 1.0 * exp((NV_Ith_S(y, 0) - (-10.0) - 8.74) / (-7.44)), (-1.0)); + yinf_ptrs[5][j] = pow(1.0 + exp((NV_Ith_S(y, 0) - (-10.0) + 33.1) / 5.3), (-1.0)); + AV_tau_oi = pow(AV_alpha_oi + AV_beta_oi, (-1.0)) / AC_K_Q10; + lmbda_ptrs[5][j] = -1. / AV_tau_oi; + + // #/* ultrarapid_delayed_rectifier_K_current_ua_gate */ + AV_alpha_ua = 0.65 * pow(exp((NV_Ith_S(y, 0) - (-10.0)) / (-8.5)) + exp((NV_Ith_S(y, 0) - (-10.0) - 40.0) / (-59.0)), (-1.0)); + AV_beta_ua = 0.65 * pow(2.5 + exp((NV_Ith_S(y, 0) - (-10.0) + 72.0) / 17.0), (-1.0)); + yinf_ptrs[6][j] = pow(1.0 + exp((NV_Ith_S(y, 0) - (-10.0) + 20.3) / (-9.6)), (-1.0)); + AV_tau_ua = pow(AV_alpha_ua + AV_beta_ua, (-1.0)) / AC_K_Q10; + lmbda_ptrs[6][j] = -1. / AV_tau_ua; + + // #/* ultrarapid_delayed_rectifier_K_current_ui_gate */ + AV_alpha_ui = pow(21.0 + 1.0 * exp((NV_Ith_S(y, 0) - (-10.0) - 195.0) / (-28.0)), (-1.0)); + AV_beta_ui = 1.0 / exp((NV_Ith_S(y, 0) - (-10.0) - 168.0) / (-16.0)); + yinf_ptrs[7][j] = pow(1.0 + exp((NV_Ith_S(y, 0) - (-10.0) - 109.45) / 27.48), (-1.0)); + AV_tau_ui = pow(AV_alpha_ui + AV_beta_ui, (-1.0)) / AC_K_Q10; + lmbda_ptrs[7][j] = -1. / AV_tau_ui; + } +} + +void Courtemanche1998::lmbda_exp(py::array_t &y_list, py::array_t &lmbda_list) +{ + double *y_ptrs[size]; + double *lmbda_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(lmbda_list, lmbda_ptrs, N, n_dofs); + + double y[size]; + double AV_tau_w, AV_tau_d, AV_tau_f, AV_alpha_h, AV_beta_h, AV_tau_h, AV_alpha_j, AV_beta_j, AV_tau_j; + double AV_alpha_m, AV_beta_m, AV_tau_m, AV_alpha_xr, AV_beta_xr, AV_tau_xr, AV_alpha_xs, AV_beta_xs; + double AV_tau_xs, AV_alpha_oa, AV_beta_oa, AV_tau_oa; + double AV_alpha_oi, AV_beta_oi, AV_tau_oi, AV_alpha_ua, AV_beta_ua, AV_tau_ua, AV_alpha_ui, AV_beta_ui, AV_tau_ui; + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // # Linear (in the gating variables) terms + + // #/* Ca_release_current_from_JSR_w_gate */ + + AV_tau_w = abs(NV_Ith_S(y, 0) - 7.9) < 1e-10 ? 6.0 * 0.2 / 1.3 : 6.0 * (1.0 - exp((-(NV_Ith_S(y, 0) - 7.9)) / 5.0)) / ((1.0 + 0.3 * exp((-(NV_Ith_S(y, 0) - 7.9)) / 5.0)) * 1.0 * (NV_Ith_S(y, 0) - 7.9)); + lmbda_ptrs[15][j] = -1. / AV_tau_w; + + // #/* L_type_Ca_channel_d_gate */ + AV_tau_d = abs(NV_Ith_S(y, 0) + 10.0) < 1e-10 ? 4.579 / (1.0 + exp((NV_Ith_S(y, 0) + 10.0) / (-6.24))) : (1.0 - exp((NV_Ith_S(y, 0) + 10.0) / (-6.24))) / (0.035 * (NV_Ith_S(y, 0) + 10.0) * (1.0 + exp((NV_Ith_S(y, 0) + 10.0) / (-6.24)))); + lmbda_ptrs[10][j] = -1. / AV_tau_d; + + // #/* L_type_Ca_channel_f_gate */ + AV_tau_f = 9.0 * pow(0.0197 * exp((-pow(0.0337, 2.0)) * pow(NV_Ith_S(y, 0) + 10.0, 2.0)) + 0.02, (-1.0)); + lmbda_ptrs[11][j] = -1. / AV_tau_f; + + // #/* fast_sodium_current_h_gate */ + AV_alpha_h = NV_Ith_S(y, 0) < (-40.0) ? 0.135 * exp((NV_Ith_S(y, 0) + 80.0) / (-6.8)) : 0.0; + AV_beta_h = NV_Ith_S(y, 0) < (-40.0) ? 3.56 * exp(0.079 * NV_Ith_S(y, 0)) + 310000.0 * exp(0.35 * NV_Ith_S(y, 0)) : 1.0 / (0.13 * (1.0 + exp((NV_Ith_S(y, 0) + 10.66) / (-11.1)))); + AV_tau_h = 1.0 / (AV_alpha_h + AV_beta_h); + lmbda_ptrs[2][j] = -1. / AV_tau_h; + + // #/* fast_sodium_current_j_gate */ + AV_alpha_j = NV_Ith_S(y, 0) < (-40.0) ? ((-127140.0) * exp(0.2444 * NV_Ith_S(y, 0)) - 3.474e-05 * exp((-0.04391) * NV_Ith_S(y, 0))) * (NV_Ith_S(y, 0) + 37.78) / (1.0 + exp(0.311 * (NV_Ith_S(y, 0) + 79.23))) : 0.0; + AV_beta_j = NV_Ith_S(y, 0) < (-40.0) ? 0.1212 * exp((-0.01052) * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1378) * (NV_Ith_S(y, 0) + 40.14))) : 0.3 * exp((-2.535e-07) * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1) * (NV_Ith_S(y, 0) + 32.0))); + AV_tau_j = 1.0 / (AV_alpha_j + AV_beta_j); + lmbda_ptrs[3][j] = -1. / AV_tau_j; + + // #/* fast_sodium_current_m_gate */ + AV_alpha_m = NV_Ith_S(y, 0) == (-47.13) ? 3.2 : 0.32 * (NV_Ith_S(y, 0) + 47.13) / (1.0 - exp((-0.1) * (NV_Ith_S(y, 0) + 47.13))); + AV_beta_m = 0.08 * exp((-NV_Ith_S(y, 0)) / 11.0); + AV_tau_m = 1.0 / (AV_alpha_m + AV_beta_m); + lmbda_ptrs[1][j] = -1. / AV_tau_m; + + // #/* rapid_delayed_rectifier_K_current_xr_gate */ + AV_alpha_xr = abs(NV_Ith_S(y, 0) + 14.1) < 1e-10 ? 0.0015 : 0.0003 * (NV_Ith_S(y, 0) + 14.1) / (1.0 - exp((NV_Ith_S(y, 0) + 14.1) / (-5.0))); + AV_beta_xr = abs(NV_Ith_S(y, 0) - 3.3328) < 1e-10 ? 3.78361180000000004e-04 : 7.38980000000000030e-05 * (NV_Ith_S(y, 0) - 3.3328) / (exp((NV_Ith_S(y, 0) - 3.3328) / 5.1237) - 1.0); + AV_tau_xr = pow(AV_alpha_xr + AV_beta_xr, (-1.0)); + lmbda_ptrs[8][j] = -1. / AV_tau_xr; + + // #/* slow_delayed_rectifier_K_current_xs_gate */ + AV_alpha_xs = abs(NV_Ith_S(y, 0) - 19.9) < 1e-10 ? 0.00068 : 4e-05 * (NV_Ith_S(y, 0) - 19.9) / (1.0 - exp((NV_Ith_S(y, 0) - 19.9) / (-17.0))); + AV_beta_xs = abs(NV_Ith_S(y, 0) - 19.9) < 1e-10 ? 0.000315 : 3.5e-05 * (NV_Ith_S(y, 0) - 19.9) / (exp((NV_Ith_S(y, 0) - 19.9) / 9.0) - 1.0); + AV_tau_xs = 0.5 * pow(AV_alpha_xs + AV_beta_xs, (-1.0)); + lmbda_ptrs[9][j] = -1. / AV_tau_xs; + + // #/* transient_outward_K_current_oa_gate */ + AV_alpha_oa = 0.65 * pow(exp((NV_Ith_S(y, 0) - (-10.0)) / (-8.5)) + exp((NV_Ith_S(y, 0) - (-10.0) - 40.0) / (-59.0)), (-1.0)); + AV_beta_oa = 0.65 * pow(2.5 + exp((NV_Ith_S(y, 0) - (-10.0) + 72.0) / 17.0), (-1.0)); + AV_tau_oa = pow(AV_alpha_oa + AV_beta_oa, (-1.0)) / AC_K_Q10; + lmbda_ptrs[4][j] = -1. / AV_tau_oa; + + // #/* transient_outward_K_current_oi_gate */ + AV_alpha_oi = pow(18.53 + 1.0 * exp((NV_Ith_S(y, 0) - (-10.0) + 103.7) / 10.95), (-1.0)); + AV_beta_oi = pow(35.56 + 1.0 * exp((NV_Ith_S(y, 0) - (-10.0) - 8.74) / (-7.44)), (-1.0)); + AV_tau_oi = pow(AV_alpha_oi + AV_beta_oi, (-1.0)) / AC_K_Q10; + lmbda_ptrs[5][j] = -1. / AV_tau_oi; + + // #/* ultrarapid_delayed_rectifier_K_current_ua_gate */ + AV_alpha_ua = 0.65 * pow(exp((NV_Ith_S(y, 0) - (-10.0)) / (-8.5)) + exp((NV_Ith_S(y, 0) - (-10.0) - 40.0) / (-59.0)), (-1.0)); + AV_beta_ua = 0.65 * pow(2.5 + exp((NV_Ith_S(y, 0) - (-10.0) + 72.0) / 17.0), (-1.0)); + AV_tau_ua = pow(AV_alpha_ua + AV_beta_ua, (-1.0)) / AC_K_Q10; + lmbda_ptrs[6][j] = -1. / AV_tau_ua; + + // #/* ultrarapid_delayed_rectifier_K_current_ui_gate */ + AV_alpha_ui = pow(21.0 + 1.0 * exp((NV_Ith_S(y, 0) - (-10.0) - 195.0) / (-28.0)), (-1.0)); + AV_beta_ui = 1.0 / exp((NV_Ith_S(y, 0) - (-10.0) - 168.0) / (-16.0)); + AV_tau_ui = pow(AV_alpha_ui + AV_beta_ui, (-1.0)) / AC_K_Q10; + lmbda_ptrs[7][j] = -1. / AV_tau_ui; + } +} + +double Courtemanche1998::rho_f_expl() +{ + return 7.5; +} + +#endif \ No newline at end of file diff --git a/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/hodgkinhuxley.h b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/hodgkinhuxley.h new file mode 100644 index 0000000000..0b178308ef --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/hodgkinhuxley.h @@ -0,0 +1,177 @@ +#include +#include + +#include +#include +#include + +#include "ionicmodel.h" + +#ifndef HODGKINHUXLEY +#define HODGKINHUXLEY + +class HodgkinHuxley : public IonicModel +{ +public: + HodgkinHuxley(const double scale_); + ~HodgkinHuxley(){}; + void f(py::array_t &y, py::array_t &fy); + void f_expl(py::array_t &y, py::array_t &fy); + void lmbda_exp(py::array_t &y_list, py::array_t &lmbda_list); + void lmbda_yinf_exp(py::array_t &y_list, py::array_t &lmbda_list, py::array_t &yinf_list); + py::list initial_values(); + double rho_f_expl(); + +private: + double AC_g_L, AC_Cm, AC_E_R, AC_E_K, AC_g_K, AC_E_Na, AC_g_Na, AC_E_L; +}; + +HodgkinHuxley::HodgkinHuxley(const double scale_) + : IonicModel(scale_) +{ + size = 4; + + // Set values of constants + AC_g_L = 0.3; + AC_Cm = 1.0; + AC_E_R = -75.0; + AC_E_K = AC_E_R - 12.0; + AC_g_K = 36.0; + AC_E_Na = AC_E_R + 115.0; + AC_g_Na = 120.0; + AC_E_L = AC_E_R + 10.613; + + assign(f_expl_args, {0, 1, 2, 3}); + assign(f_exp_args, {0, 1, 2, 3}); + assign(f_expl_indeces, {0}); + assign(f_exp_indeces, {1, 2, 3}); +} + +py::list HodgkinHuxley::initial_values() +{ + py::list y0(size); + y0[0] = -75.0; + y0[1] = 0.05; + y0[2] = 0.595; + y0[3] = 0.317; + + return y0; +} + +void HodgkinHuxley::f(py::array_t &y_list, py::array_t &fy_list) +{ + double *y_ptrs[size]; + double *fy_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(fy_list, fy_ptrs, N, n_dofs); + + double AV_alpha_n, AV_beta_n, AV_alpha_h, AV_beta_h, AV_alpha_m, AV_beta_m, AV_i_K, AV_i_Na, AV_i_L; + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + double y[4] = {y_ptrs[0][j], y_ptrs[1][j], y_ptrs[2][j], y_ptrs[3][j]}; + + AV_alpha_n = (-0.01) * (y[0] + 65.0) / (exp((-(y[0] + 65.0)) / 10.0) - 1.0); + AV_beta_n = 0.125 * exp((y[0] + 75.0) / 80.0); + fy_ptrs[3][j] = AV_alpha_n * (1.0 - y[3]) - AV_beta_n * y[3]; + + AV_alpha_h = 0.07 * exp((-(y[0] + 75.0)) / 20.0); + AV_beta_h = 1.0 / (exp((-(y[0] + 45.0)) / 10.0) + 1.0); + fy_ptrs[2][j] = AV_alpha_h * (1.0 - y[2]) - AV_beta_h * y[2]; + + AV_alpha_m = (-0.1) * (y[0] + 50.0) / (exp((-(y[0] + 50.0)) / 10.0) - 1.0); + AV_beta_m = 4.0 * exp((-(y[0] + 75.0)) / 18.0); + fy_ptrs[1][j] = AV_alpha_m * (1.0 - y[1]) - AV_beta_m * y[1]; + + AV_i_K = AC_g_K * pow(y[3], 4.0) * (y[0] - AC_E_K); + AV_i_Na = AC_g_Na * pow(y[1], 3.0) * y[2] * (y[0] - AC_E_Na); + AV_i_L = AC_g_L * (y[0] - AC_E_L); + fy_ptrs[0][j] = -scale * (AV_i_Na + AV_i_K + AV_i_L); + } +} + +void HodgkinHuxley::f_expl(py::array_t &y_list, py::array_t &fy_list) +{ + double *y_ptrs[size]; + double *fy_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(fy_list, fy_ptrs, N, n_dofs); + + double AV_i_K, AV_i_Na, AV_i_L; + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + AV_i_K = AC_g_K * pow(y_ptrs[3][j], 4.0) * (y_ptrs[0][j] - AC_E_K); + AV_i_Na = AC_g_Na * pow(y_ptrs[1][j], 3.0) * y_ptrs[2][j] * (y_ptrs[0][j] - AC_E_Na); + AV_i_L = AC_g_L * (y_ptrs[0][j] - AC_E_L); + fy_ptrs[0][j] = -scale * (AV_i_Na + AV_i_K + AV_i_L); + } +} + +void HodgkinHuxley::lmbda_exp(py::array_t &y_list, py::array_t &lmbda_list) +{ + double *y_ptrs[size]; + double *lmbda_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(lmbda_list, lmbda_ptrs, N, n_dofs); + + double AV_alpha_n, AV_beta_n, AV_alpha_h, AV_beta_h, AV_alpha_m, AV_beta_m; + for (unsigned j = 0; j < n_dofs; j++) + { + AV_alpha_n = (-0.01) * (y_ptrs[0][j] + 65.0) / (exp((-(y_ptrs[0][j] + 65.0)) / 10.0) - 1.0); + AV_beta_n = 0.125 * exp((y_ptrs[0][j] + 75.0) / 80.0); + lmbda_ptrs[3][j] = -(AV_alpha_n + AV_beta_n); + + AV_alpha_h = 0.07 * exp((-(y_ptrs[0][j] + 75.0)) / 20.0); + AV_beta_h = 1.0 / (exp((-(y_ptrs[0][j] + 45.0)) / 10.0) + 1.0); + lmbda_ptrs[2][j] = -(AV_alpha_h + AV_beta_h); + + AV_alpha_m = (-0.1) * (y_ptrs[0][j] + 50.0) / (exp((-(y_ptrs[0][j] + 50.0)) / 10.0) - 1.0); + AV_beta_m = 4.0 * exp((-(y_ptrs[0][j] + 75.0)) / 18.0); + lmbda_ptrs[1][j] = -(AV_alpha_m + AV_beta_m); + } +} + +void HodgkinHuxley::lmbda_yinf_exp(py::array_t &y_list, py::array_t &lmbda_list, py::array_t &yinf_list) +{ + double *y_ptrs[size]; + double *lmbda_ptrs[size]; + double *yinf_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(lmbda_list, lmbda_ptrs, N, n_dofs); + get_raw_data(yinf_list, yinf_ptrs, N, n_dofs); + + double AV_alpha_n, AV_beta_n, AV_alpha_h, AV_beta_h, AV_alpha_m, AV_beta_m; + for (unsigned j = 0; j < n_dofs; j++) + { + AV_alpha_n = (-0.01) * (y_ptrs[0][j] + 65.0) / (exp((-(y_ptrs[0][j] + 65.0)) / 10.0) - 1.0); + AV_beta_n = 0.125 * exp((y_ptrs[0][j] + 75.0) / 80.0); + lmbda_ptrs[3][j] = -(AV_alpha_n + AV_beta_n); + yinf_ptrs[3][j] = -AV_alpha_n / lmbda_ptrs[3][j]; + + AV_alpha_h = 0.07 * exp((-(y_ptrs[0][j] + 75.0)) / 20.0); + AV_beta_h = 1.0 / (exp((-(y_ptrs[0][j] + 45.0)) / 10.0) + 1.0); + lmbda_ptrs[2][j] = -(AV_alpha_h + AV_beta_h); + yinf_ptrs[2][j] = -AV_alpha_h / lmbda_ptrs[2][j]; + + AV_alpha_m = (-0.1) * (y_ptrs[0][j] + 50.0) / (exp((-(y_ptrs[0][j] + 50.0)) / 10.0) - 1.0); + AV_beta_m = 4.0 * exp((-(y_ptrs[0][j] + 75.0)) / 18.0); + lmbda_ptrs[1][j] = -(AV_alpha_m + AV_beta_m); + yinf_ptrs[1][j] = -AV_alpha_m / lmbda_ptrs[1][j]; + } +} + +double HodgkinHuxley::rho_f_expl() +{ + return 40.; +} + +#endif \ No newline at end of file diff --git a/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/ionicmodel.h b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/ionicmodel.h new file mode 100644 index 0000000000..0cdee8648a --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/ionicmodel.h @@ -0,0 +1,61 @@ +#ifndef IONICMODEL +#define IONICMODEL + +// The ionic model codes are partially generated by myokit, which uses this NV_Ith_S function to access the elements of the array in their data type. +// In our implementation we use the [] operator to access the elements of the array so we define this function as a wrapper and avoid changing the original code. +double inline NV_Ith_S(double *y, unsigned i) +{ + return y[i]; +} + +double inline phi_f_from_lmbda_yinf(double y, double lmbda, double yinf, double dt) +{ + return ((exp(dt * lmbda) - 1.) / dt) * (y - yinf); +} + +double inline phi_f_from_tau_yinf(double y, double tau, double yinf, double dt) +{ + return ((exp(-dt / tau) - 1.) / dt) * (y - yinf); +} + +void get_raw_data(py::array_t &x, double **array_ptrs, size_t &N, size_t &n_dofs) +{ + auto r = x.unchecked<2>(); + N = r.shape(0); + n_dofs = r.shape(1); + for (py::ssize_t i = 0; i < r.shape(0); i++) + array_ptrs[i] = (double *)r.data(i, 0); +}; + +void assign(py::list l, std::initializer_list a) +{ + for (auto a_el : a) + l.append(a_el); +}; + +class IonicModel +{ +public: + IonicModel(const double scale_); + + py::list f_expl_args; + py::list f_exp_args; + py::list f_expl_indeces; + py::list f_exp_indeces; + py::list get_f_expl_args() { return f_expl_args; }; + py::list get_f_exp_args() { return f_exp_args; }; + py::list get_f_expl_indeces() { return f_expl_indeces; }; + py::list get_f_exp_indeces() { return f_exp_indeces; }; + size_t get_size() { return size; }; + +protected: + double scale; + size_t size; +}; + +IonicModel::IonicModel(const double scale_) +{ + scale = scale_; +} + +#endif \ No newline at end of file diff --git a/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/tentusscher.h b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/tentusscher.h new file mode 100644 index 0000000000..2b6ea5eace --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/tentusscher.h @@ -0,0 +1,542 @@ +#include +#include + +#include +#include +#include + +#include "ionicmodel.h" + +#ifndef TENTUSSCHER +#define TENTUSSCHER + +class TenTusscher2006_epi : public IonicModel +{ +public: + TenTusscher2006_epi(const double scale_); + ~TenTusscher2006_epi(){}; + void f(py::array_t &y, py::array_t &fy); + void f_expl(py::array_t &y, py::array_t &fy); + void lmbda_exp(py::array_t &y_list, py::array_t &lmbda_list); + void lmbda_yinf_exp(py::array_t &y_list, py::array_t &lmbda_list, py::array_t &yinf_list); + py::list initial_values(); + double rho_f_expl(); + +private: + double AC_Cm, AC_K_pCa, AC_g_pCa, AC_g_CaL, AC_g_bca, AC_Buf_c, AC_Buf_sr, AC_Buf_ss, AC_Ca_o, AC_EC, AC_K_buf_c, AC_K_buf_sr, AC_K_buf_ss, AC_K_up, AC_V_leak, AC_V_rel, AC_V_sr, AC_V_ss, AC_V_xfer, AC_Vmax_up, AC_k1_prime, AC_k2_prime, AC_k3, AC_k4, AC_max_sr, AC_min_sr, AC_g_Na, AC_g_K1, AC_F, AC_R, AC_T, AC_V_c, AC_stim_amplitude, AC_K_o, AC_g_pK, AC_g_Kr, AC_P_kna, AC_g_Ks, AC_g_bna, AC_K_NaCa, AC_K_sat, AC_Km_Ca, AC_Km_Nai, AC_alpha, AC_sodium_calcium_exchanger_current_gamma, AC_Na_o, AC_K_mNa, AC_K_mk, AC_P_NaK, AC_g_to; +}; + +TenTusscher2006_epi::TenTusscher2006_epi(const double scale_) + : IonicModel(scale_) +{ + size = 19; + + AC_Cm = 1.0; // 185.0; + AC_K_pCa = 0.0005; + AC_g_pCa = 0.1238; + AC_g_CaL = 0.0398; + AC_g_bca = 0.000592; + AC_Buf_c = 0.2; + AC_Buf_sr = 10.0; + AC_Buf_ss = 0.4; + AC_Ca_o = 2.0; + AC_EC = 1.5; + AC_K_buf_c = 0.001; + AC_K_buf_sr = 0.3; + AC_K_buf_ss = 0.00025; + AC_K_up = 0.00025; + AC_V_leak = 0.00036; + AC_V_rel = 0.102; + AC_V_sr = 1094.0; + AC_V_ss = 54.68; + AC_V_xfer = 0.0038; + AC_Vmax_up = 0.006375; + AC_k1_prime = 0.15; + AC_k2_prime = 0.045; + AC_k3 = 0.06; + AC_k4 = 0.005; + AC_max_sr = 2.5; + AC_min_sr = 1.0; + AC_g_Na = 14.838; + AC_g_K1 = 5.405; + AC_F = 96.485; + AC_R = 8.314; + AC_T = 310.0; + AC_V_c = 16404.0; + AC_stim_amplitude = (-52.0); + AC_K_o = 5.4; + AC_g_pK = 0.0146; + AC_g_Kr = 0.153; + AC_P_kna = 0.03; + AC_g_Ks = 0.392; + AC_g_bna = 0.00029; + AC_K_NaCa = 1000.0; + AC_K_sat = 0.1; + AC_Km_Ca = 1.38; + AC_Km_Nai = 87.5; + AC_alpha = 2.5; + AC_sodium_calcium_exchanger_current_gamma = 0.35; + AC_Na_o = 140.0; + AC_K_mNa = 40.0; + AC_K_mk = 1.0; + AC_P_NaK = 2.724; + AC_g_to = 0.294; + + assign(f_expl_args, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); + assign(f_exp_args, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15}); + assign(f_expl_indeces, {0, 13, 14, 15, 16, 17, 18}); + assign(f_exp_indeces, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); +} + +py::list TenTusscher2006_epi::initial_values() +{ + py::list y0(size); + y0[0] = -85.23; + y0[1] = 0.00621; + y0[2] = 0.4712; + y0[3] = 0.0095; + y0[4] = 0.00172; + y0[5] = 0.7444; + y0[6] = 0.7045; + y0[7] = 3.373e-05; + y0[8] = 0.7888; + y0[9] = 0.9755; + y0[10] = 0.9953; + y0[11] = 0.999998; + y0[12] = 2.42e-08; + y0[13] = 0.000126; + y0[14] = 3.64; + y0[15] = 0.00036; + y0[16] = 0.9073; + y0[17] = 8.604; + y0[18] = 136.89; + + return y0; +} + +void TenTusscher2006_epi::f(py::array_t &y_list, py::array_t &fy_list) +{ + double *y_ptrs[size]; + double *fy_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(fy_list, fy_ptrs, N, n_dofs); + + double y[size]; + // # needed for linear in gating variables + double AV_alpha_d, AV_beta_d, AV_d_inf, AV_gamma_d, AV_tau_d, AV_f2_inf, AV_tau_f2, AV_fCass_inf, AV_tau_fCass, AV_f_inf, AV_tau_f; + double AV_alpha_h, AV_beta_h, AV_h_inf, AV_tau_h, AV_alpha_j, AV_beta_j, AV_j_inf, AV_tau_j, AV_alpha_m, AV_beta_m, AV_m_inf, AV_tau_m; + double AV_alpha_xr1, AV_beta_xr1, AV_xr1_inf, AV_tau_xr1, AV_alpha_xr2, AV_beta_xr2, AV_xr2_inf, AV_tau_xr2, AV_alpha_xs, AV_beta_xs, AV_xs_inf, AV_tau_xs; + double AV_r_inf, AV_tau_r, AV_s_inf, AV_tau_s; + // # needed for nonlinear in gating variables + double AV_f_JCa_i_free, AV_f_JCa_sr_free, AV_f_JCa_ss_free, AV_i_leak, AV_i_up, AV_i_xfer, AV_kcasr, AV_k1, AV_k2, AV_O, AV_i_rel, AV_ddt_Ca_sr_total; + double AV_E_Ca, AV_E_K, AV_i_NaK, AV_i_to, AV_i_p_Ca, AV_i_CaL, AV_i_b_Ca, AV_alpha_K1, AV_beta_K1, AV_i_p_K, AV_i_Kr, AV_E_Ks, AV_E_Na, AV_i_NaCa; + double AV_ddt_Ca_i_total, AV_ddt_Ca_ss_total, AV_i_Na, AV_i_K1, AV_xK1_inf, AV_i_Ks, AV_i_b_Na; + + for (unsigned j = 0; j < n_dofs; j++) + { + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // # Linear in gating variables + + // # /* L_type_Ca_current_d_gate */ + AV_alpha_d = 1.4 / (1.0 + exp(((-35.0) - NV_Ith_S(y, 0)) / 13.0)) + 0.25; + AV_beta_d = 1.4 / (1.0 + exp((NV_Ith_S(y, 0) + 5.0) / 5.0)); + AV_d_inf = 1.0 / (1.0 + exp(((-8.0) - NV_Ith_S(y, 0)) / 7.5)); + AV_gamma_d = 1.0 / (1.0 + exp((50.0 - NV_Ith_S(y, 0)) / 20.0)); + AV_tau_d = 1.0 * AV_alpha_d * AV_beta_d + AV_gamma_d; + fy_ptrs[7][j] = (AV_d_inf - NV_Ith_S(y, 7)) / AV_tau_d; + + // # /* L_type_Ca_current_f2_gate */ + AV_f2_inf = 0.67 / (1.0 + exp((NV_Ith_S(y, 0) + 35.0) / 7.0)) + 0.33; + AV_tau_f2 = 562.0 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 240.0) + 31.0 / (1.0 + exp((25.0 - NV_Ith_S(y, 0)) / 10.0)) + 80.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)); + fy_ptrs[9][j] = (AV_f2_inf - NV_Ith_S(y, 9)) / AV_tau_f2; + + // # /* L_type_Ca_current_fCass_gate */ + AV_fCass_inf = 0.6 / (1.0 + pow(NV_Ith_S(y, 15) / 0.05, 2.0)) + 0.4; + AV_tau_fCass = 80.0 / (1.0 + pow(NV_Ith_S(y, 15) / 0.05, 2.0)) + 2.0; + fy_ptrs[10][j] = (AV_fCass_inf - NV_Ith_S(y, 10)) / AV_tau_fCass; + + // # /* L_type_Ca_current_f_gate */ + AV_f_inf = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 20.0) / 7.0)); + AV_tau_f = 1102.5 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 225.0) + 200.0 / (1.0 + exp((13.0 - NV_Ith_S(y, 0)) / 10.0)) + 180.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)) + 20.0; + fy_ptrs[8][j] = (AV_f_inf - NV_Ith_S(y, 8)) / AV_tau_f; + + // # /* fast_sodium_current_h_gate */ + AV_alpha_h = NV_Ith_S(y, 0) < (-40.0) ? 0.057 * exp((-(NV_Ith_S(y, 0) + 80.0)) / 6.8) : 0.0; + AV_beta_h = NV_Ith_S(y, 0) < (-40.0) ? 2.7 * exp(0.079 * NV_Ith_S(y, 0)) + 310000.0 * exp(0.3485 * NV_Ith_S(y, 0)) : 0.77 / (0.13 * (1.0 + exp((NV_Ith_S(y, 0) + 10.66) / (-11.1)))); + AV_h_inf = 1.0 / pow(1.0 + exp((NV_Ith_S(y, 0) + 71.55) / 7.43), 2.0); + AV_tau_h = 1.0 / (AV_alpha_h + AV_beta_h); + fy_ptrs[5][j] = (AV_h_inf - NV_Ith_S(y, 5)) / AV_tau_h; + + // # /* fast_sodium_current_j_gate */ + AV_alpha_j = NV_Ith_S(y, 0) < (-40.0) ? ((-25428.0) * exp(0.2444 * NV_Ith_S(y, 0)) - 6.948e-06 * exp((-0.04391) * NV_Ith_S(y, 0))) * (NV_Ith_S(y, 0) + 37.78) / 1.0 / (1.0 + exp(0.311 * (NV_Ith_S(y, 0) + 79.23))) : 0.0; + AV_beta_j = NV_Ith_S(y, 0) < (-40.0) ? 0.02424 * exp((-0.01052) * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1378) * (NV_Ith_S(y, 0) + 40.14))) : 0.6 * exp(0.057 * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1) * (NV_Ith_S(y, 0) + 32.0))); + AV_j_inf = 1.0 / pow(1.0 + exp((NV_Ith_S(y, 0) + 71.55) / 7.43), 2.0); + AV_tau_j = 1.0 / (AV_alpha_j + AV_beta_j); + fy_ptrs[6][j] = (AV_j_inf - NV_Ith_S(y, 6)) / AV_tau_j; + + // # /* fast_sodium_current_m_gate */ + AV_alpha_m = 1.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 5.0)); + AV_beta_m = 0.1 / (1.0 + exp((NV_Ith_S(y, 0) + 35.0) / 5.0)) + 0.1 / (1.0 + exp((NV_Ith_S(y, 0) - 50.0) / 200.0)); + AV_m_inf = 1.0 / pow(1.0 + exp(((-56.86) - NV_Ith_S(y, 0)) / 9.03), 2.0); + AV_tau_m = 1.0 * AV_alpha_m * AV_beta_m; + fy_ptrs[4][j] = (AV_m_inf - NV_Ith_S(y, 4)) / AV_tau_m; + + // # /* rapid_time_dependent_potassium_current_Xr1_gate */ + AV_alpha_xr1 = 450.0 / (1.0 + exp(((-45.0) - NV_Ith_S(y, 0)) / 10.0)); + AV_beta_xr1 = 6.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 11.5)); + AV_xr1_inf = 1.0 / (1.0 + exp(((-26.0) - NV_Ith_S(y, 0)) / 7.0)); + AV_tau_xr1 = 1.0 * AV_alpha_xr1 * AV_beta_xr1; + fy_ptrs[1][j] = (AV_xr1_inf - NV_Ith_S(y, 1)) / AV_tau_xr1; + + // # /* rapid_time_dependent_potassium_current_Xr2_gate */ + AV_alpha_xr2 = 3.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 20.0)); + AV_beta_xr2 = 1.12 / (1.0 + exp((NV_Ith_S(y, 0) - 60.0) / 20.0)); + AV_xr2_inf = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 88.0) / 24.0)); + AV_tau_xr2 = 1.0 * AV_alpha_xr2 * AV_beta_xr2; + fy_ptrs[2][j] = (AV_xr2_inf - NV_Ith_S(y, 2)) / AV_tau_xr2; + + // # /* slow_time_dependent_potassium_current_Xs_gate */ + AV_alpha_xs = 1400.0 / sqrt(1.0 + exp((5.0 - NV_Ith_S(y, 0)) / 6.0)); + AV_beta_xs = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) - 35.0) / 15.0)); + AV_xs_inf = 1.0 / (1.0 + exp(((-5.0) - NV_Ith_S(y, 0)) / 14.0)); + AV_tau_xs = 1.0 * AV_alpha_xs * AV_beta_xs + 80.0; + fy_ptrs[3][j] = (AV_xs_inf - NV_Ith_S(y, 3)) / AV_tau_xs; + + // # /* transient_outward_current_r_gate */ + AV_r_inf = 1.0 / (1.0 + exp((20.0 - NV_Ith_S(y, 0)) / 6.0)); + AV_tau_r = 9.5 * exp((-pow(NV_Ith_S(y, 0) + 40.0, 2.0)) / 1800.0) + 0.8; + fy_ptrs[12][j] = (AV_r_inf - NV_Ith_S(y, 12)) / AV_tau_r; + + // # /* transient_outward_current_s_gate */ + AV_s_inf = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 20.0) / 5.0)); + AV_tau_s = 85.0 * exp((-pow(NV_Ith_S(y, 0) + 45.0, 2.0)) / 320.0) + 5.0 / (1.0 + exp((NV_Ith_S(y, 0) - 20.0) / 5.0)) + 3.0; + fy_ptrs[11][j] = (AV_s_inf - NV_Ith_S(y, 11)) / AV_tau_s; + + // # Non linear in gating variables + + // # /* calcium_dynamics */ + AV_f_JCa_i_free = 1.0 / (1.0 + AC_Buf_c * AC_K_buf_c / pow(NV_Ith_S(y, 13) + AC_K_buf_c, 2.0)); + AV_f_JCa_sr_free = 1.0 / (1.0 + AC_Buf_sr * AC_K_buf_sr / pow(NV_Ith_S(y, 14) + AC_K_buf_sr, 2.0)); + AV_f_JCa_ss_free = 1.0 / (1.0 + AC_Buf_ss * AC_K_buf_ss / pow(NV_Ith_S(y, 15) + AC_K_buf_ss, 2.0)); + AV_i_leak = AC_V_leak * (NV_Ith_S(y, 14) - NV_Ith_S(y, 13)); + AV_i_up = AC_Vmax_up / (1.0 + pow(AC_K_up, 2.0) / pow(NV_Ith_S(y, 13), 2.0)); + AV_i_xfer = AC_V_xfer * (NV_Ith_S(y, 15) - NV_Ith_S(y, 13)); + AV_kcasr = AC_max_sr - (AC_max_sr - AC_min_sr) / (1.0 + pow(AC_EC / NV_Ith_S(y, 14), 2.0)); + AV_k1 = AC_k1_prime / AV_kcasr; + AV_k2 = AC_k2_prime * AV_kcasr; + AV_O = AV_k1 * pow(NV_Ith_S(y, 15), 2.0) * NV_Ith_S(y, 16) / (AC_k3 + AV_k1 * pow(NV_Ith_S(y, 15), 2.0)); + fy_ptrs[16][j] = (-AV_k2) * NV_Ith_S(y, 15) * NV_Ith_S(y, 16) + AC_k4 * (1.0 - NV_Ith_S(y, 16)); + AV_i_rel = AC_V_rel * AV_O * (NV_Ith_S(y, 14) - NV_Ith_S(y, 15)); + AV_ddt_Ca_sr_total = AV_i_up - (AV_i_rel + AV_i_leak); + fy_ptrs[14][j] = AV_ddt_Ca_sr_total * AV_f_JCa_sr_free; + + // # /* reversal_potentials */ + AV_E_Ca = 0.5 * AC_R * AC_T / AC_F * log(AC_Ca_o / NV_Ith_S(y, 13)); + AV_E_K = AC_R * AC_T / AC_F * log(AC_K_o / NV_Ith_S(y, 18)); + + // # /* sodium_potassium_pump_current */ + AV_i_NaK = AC_P_NaK * AC_K_o / (AC_K_o + AC_K_mk) * NV_Ith_S(y, 17) / (NV_Ith_S(y, 17) + AC_K_mNa) / (1.0 + 0.1245 * exp((-0.1) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) + 0.0353 * exp((-NV_Ith_S(y, 0)) * AC_F / (AC_R * AC_T))); + + // # /* transient_outward_current */ + AV_i_to = AC_g_to * NV_Ith_S(y, 12) * NV_Ith_S(y, 11) * (NV_Ith_S(y, 0) - AV_E_K); + + // # /* calcium_pump_current */ + AV_i_p_Ca = AC_g_pCa * NV_Ith_S(y, 13) / (NV_Ith_S(y, 13) + AC_K_pCa); + + // # /* *remaining* */ + AV_i_CaL = AC_g_CaL * NV_Ith_S(y, 7) * NV_Ith_S(y, 8) * NV_Ith_S(y, 9) * NV_Ith_S(y, 10) * 4.0 * (NV_Ith_S(y, 0) - 15.0) * pow(AC_F, 2.0) / (AC_R * AC_T) * (0.25 * NV_Ith_S(y, 15) * exp(2.0 * (NV_Ith_S(y, 0) - 15.0) * AC_F / (AC_R * AC_T)) - AC_Ca_o) / (exp(2.0 * (NV_Ith_S(y, 0) - 15.0) * AC_F / (AC_R * AC_T)) - 1.0); + AV_i_b_Ca = AC_g_bca * (NV_Ith_S(y, 0) - AV_E_Ca); + AV_alpha_K1 = 0.1 / (1.0 + exp(0.06 * (NV_Ith_S(y, 0) - AV_E_K - 200.0))); + AV_beta_K1 = (3.0 * exp(0.0002 * (NV_Ith_S(y, 0) - AV_E_K + 100.0)) + exp(0.1 * (NV_Ith_S(y, 0) - AV_E_K - 10.0))) / (1.0 + exp((-0.5) * (NV_Ith_S(y, 0) - AV_E_K))); + AV_i_p_K = AC_g_pK * (NV_Ith_S(y, 0) - AV_E_K) / (1.0 + exp((25.0 - NV_Ith_S(y, 0)) / 5.98)); + AV_i_Kr = AC_g_Kr * sqrt(AC_K_o / 5.4) * NV_Ith_S(y, 1) * NV_Ith_S(y, 2) * (NV_Ith_S(y, 0) - AV_E_K); + AV_E_Ks = AC_R * AC_T / AC_F * log((AC_K_o + AC_P_kna * AC_Na_o) / (NV_Ith_S(y, 18) + AC_P_kna * NV_Ith_S(y, 17))); + AV_E_Na = AC_R * AC_T / AC_F * log(AC_Na_o / NV_Ith_S(y, 17)); + AV_i_NaCa = AC_K_NaCa * (exp(AC_sodium_calcium_exchanger_current_gamma * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) * pow(NV_Ith_S(y, 17), 3.0) * AC_Ca_o - exp((AC_sodium_calcium_exchanger_current_gamma - 1.0) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) * pow(AC_Na_o, 3.0) * NV_Ith_S(y, 13) * AC_alpha) / ((pow(AC_Km_Nai, 3.0) + pow(AC_Na_o, 3.0)) * (AC_Km_Ca + AC_Ca_o) * (1.0 + AC_K_sat * exp((AC_sodium_calcium_exchanger_current_gamma - 1.0) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)))); + AV_ddt_Ca_i_total = (-(AV_i_b_Ca + AV_i_p_Ca - 2.0 * AV_i_NaCa)) * AC_Cm / (2.0 * AC_V_c * AC_F) + (AV_i_leak - AV_i_up) * AC_V_sr / AC_V_c + AV_i_xfer; + AV_ddt_Ca_ss_total = (-AV_i_CaL) * AC_Cm / (2.0 * AC_V_ss * AC_F) + AV_i_rel * AC_V_sr / AC_V_ss - AV_i_xfer * AC_V_c / AC_V_ss; + AV_i_Na = AC_g_Na * pow(NV_Ith_S(y, 4), 3.0) * NV_Ith_S(y, 5) * NV_Ith_S(y, 6) * (NV_Ith_S(y, 0) - AV_E_Na); + AV_xK1_inf = AV_alpha_K1 / (AV_alpha_K1 + AV_beta_K1); + AV_i_Ks = AC_g_Ks * pow(NV_Ith_S(y, 3), 2.0) * (NV_Ith_S(y, 0) - AV_E_Ks); + AV_i_b_Na = AC_g_bna * (NV_Ith_S(y, 0) - AV_E_Na); + fy_ptrs[13][j] = AV_ddt_Ca_i_total * AV_f_JCa_i_free; + fy_ptrs[15][j] = AV_ddt_Ca_ss_total * AV_f_JCa_ss_free; + AV_i_K1 = AC_g_K1 * AV_xK1_inf * sqrt(AC_K_o / 5.4) * (NV_Ith_S(y, 0) - AV_E_K); + fy_ptrs[17][j] = (-(AV_i_Na + AV_i_b_Na + 3.0 * AV_i_NaK + 3.0 * AV_i_NaCa)) / (AC_V_c * AC_F) * AC_Cm; + fy_ptrs[0][j] = scale * (-(AV_i_K1 + AV_i_to + AV_i_Kr + AV_i_Ks + AV_i_CaL + AV_i_NaK + AV_i_Na + AV_i_b_Na + AV_i_NaCa + AV_i_b_Ca + AV_i_p_K + AV_i_p_Ca)); + fy_ptrs[18][j] = (-(AV_i_K1 + AV_i_to + AV_i_Kr + AV_i_Ks + AV_i_p_K - 2.0 * AV_i_NaK)) / (AC_V_c * AC_F) * AC_Cm; + } +} + +void TenTusscher2006_epi::f_expl(py::array_t &y_list, py::array_t &fy_list) +{ + double *y_ptrs[size]; + double *fy_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(fy_list, fy_ptrs, N, n_dofs); + + double y[size]; + double AV_f_JCa_i_free, AV_f_JCa_sr_free, AV_f_JCa_ss_free, AV_i_leak, AV_i_up, AV_i_xfer, AV_kcasr, AV_k1, AV_k2, AV_O, AV_i_rel, AV_ddt_Ca_sr_total; + double AV_E_Ca, AV_E_K, AV_i_NaK, AV_i_to, AV_i_p_Ca, AV_i_CaL, AV_i_b_Ca, AV_alpha_K1, AV_beta_K1, AV_i_p_K, AV_i_Kr, AV_E_Ks, AV_E_Na, AV_i_NaCa; + double AV_ddt_Ca_i_total, AV_ddt_Ca_ss_total, AV_i_Na, AV_i_K1, AV_xK1_inf, AV_i_Ks, AV_i_b_Na; + + for (unsigned j = 0; j < n_dofs; j++) + { + + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + AV_f_JCa_i_free = 1.0 / (1.0 + AC_Buf_c * AC_K_buf_c / pow(NV_Ith_S(y, 13) + AC_K_buf_c, 2.0)); + AV_f_JCa_sr_free = 1.0 / (1.0 + AC_Buf_sr * AC_K_buf_sr / pow(NV_Ith_S(y, 14) + AC_K_buf_sr, 2.0)); + AV_f_JCa_ss_free = 1.0 / (1.0 + AC_Buf_ss * AC_K_buf_ss / pow(NV_Ith_S(y, 15) + AC_K_buf_ss, 2.0)); + AV_i_leak = AC_V_leak * (NV_Ith_S(y, 14) - NV_Ith_S(y, 13)); + AV_i_up = AC_Vmax_up / (1.0 + pow(AC_K_up, 2.0) / pow(NV_Ith_S(y, 13), 2.0)); + AV_i_xfer = AC_V_xfer * (NV_Ith_S(y, 15) - NV_Ith_S(y, 13)); + AV_kcasr = AC_max_sr - (AC_max_sr - AC_min_sr) / (1.0 + pow(AC_EC / NV_Ith_S(y, 14), 2.0)); + AV_k1 = AC_k1_prime / AV_kcasr; + AV_k2 = AC_k2_prime * AV_kcasr; + AV_O = AV_k1 * pow(NV_Ith_S(y, 15), 2.0) * NV_Ith_S(y, 16) / (AC_k3 + AV_k1 * pow(NV_Ith_S(y, 15), 2.0)); + fy_ptrs[16][j] = (-AV_k2) * NV_Ith_S(y, 15) * NV_Ith_S(y, 16) + AC_k4 * (1.0 - NV_Ith_S(y, 16)); + AV_i_rel = AC_V_rel * AV_O * (NV_Ith_S(y, 14) - NV_Ith_S(y, 15)); + AV_ddt_Ca_sr_total = AV_i_up - (AV_i_rel + AV_i_leak); + fy_ptrs[14][j] = AV_ddt_Ca_sr_total * AV_f_JCa_sr_free; + + // # /* reversal_potentials */ + AV_E_Ca = 0.5 * AC_R * AC_T / AC_F * log(AC_Ca_o / NV_Ith_S(y, 13)); + AV_E_K = AC_R * AC_T / AC_F * log(AC_K_o / NV_Ith_S(y, 18)); + + // # /* sodium_potassium_pump_current */ + AV_i_NaK = AC_P_NaK * AC_K_o / (AC_K_o + AC_K_mk) * NV_Ith_S(y, 17) / (NV_Ith_S(y, 17) + AC_K_mNa) / (1.0 + 0.1245 * exp((-0.1) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) + 0.0353 * exp((-NV_Ith_S(y, 0)) * AC_F / (AC_R * AC_T))); + + // # /* transient_outward_current */ + AV_i_to = AC_g_to * NV_Ith_S(y, 12) * NV_Ith_S(y, 11) * (NV_Ith_S(y, 0) - AV_E_K); + + // # /* calcium_pump_current */ + AV_i_p_Ca = AC_g_pCa * NV_Ith_S(y, 13) / (NV_Ith_S(y, 13) + AC_K_pCa); + + // # /* *remaining* */ + AV_i_CaL = AC_g_CaL * NV_Ith_S(y, 7) * NV_Ith_S(y, 8) * NV_Ith_S(y, 9) * NV_Ith_S(y, 10) * 4.0 * (NV_Ith_S(y, 0) - 15.0) * pow(AC_F, 2.0) / (AC_R * AC_T) * (0.25 * NV_Ith_S(y, 15) * exp(2.0 * (NV_Ith_S(y, 0) - 15.0) * AC_F / (AC_R * AC_T)) - AC_Ca_o) / (exp(2.0 * (NV_Ith_S(y, 0) - 15.0) * AC_F / (AC_R * AC_T)) - 1.0); + AV_i_b_Ca = AC_g_bca * (NV_Ith_S(y, 0) - AV_E_Ca); + AV_alpha_K1 = 0.1 / (1.0 + exp(0.06 * (NV_Ith_S(y, 0) - AV_E_K - 200.0))); + AV_beta_K1 = (3.0 * exp(0.0002 * (NV_Ith_S(y, 0) - AV_E_K + 100.0)) + exp(0.1 * (NV_Ith_S(y, 0) - AV_E_K - 10.0))) / (1.0 + exp((-0.5) * (NV_Ith_S(y, 0) - AV_E_K))); + AV_i_p_K = AC_g_pK * (NV_Ith_S(y, 0) - AV_E_K) / (1.0 + exp((25.0 - NV_Ith_S(y, 0)) / 5.98)); + AV_i_Kr = AC_g_Kr * sqrt(AC_K_o / 5.4) * NV_Ith_S(y, 1) * NV_Ith_S(y, 2) * (NV_Ith_S(y, 0) - AV_E_K); + AV_E_Ks = AC_R * AC_T / AC_F * log((AC_K_o + AC_P_kna * AC_Na_o) / (NV_Ith_S(y, 18) + AC_P_kna * NV_Ith_S(y, 17))); + AV_E_Na = AC_R * AC_T / AC_F * log(AC_Na_o / NV_Ith_S(y, 17)); + AV_i_NaCa = AC_K_NaCa * (exp(AC_sodium_calcium_exchanger_current_gamma * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) * pow(NV_Ith_S(y, 17), 3.0) * AC_Ca_o - exp((AC_sodium_calcium_exchanger_current_gamma - 1.0) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) * pow(AC_Na_o, 3.0) * NV_Ith_S(y, 13) * AC_alpha) / ((pow(AC_Km_Nai, 3.0) + pow(AC_Na_o, 3.0)) * (AC_Km_Ca + AC_Ca_o) * (1.0 + AC_K_sat * exp((AC_sodium_calcium_exchanger_current_gamma - 1.0) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)))); + AV_ddt_Ca_i_total = (-(AV_i_b_Ca + AV_i_p_Ca - 2.0 * AV_i_NaCa)) * AC_Cm / (2.0 * AC_V_c * AC_F) + (AV_i_leak - AV_i_up) * AC_V_sr / AC_V_c + AV_i_xfer; + AV_ddt_Ca_ss_total = (-AV_i_CaL) * AC_Cm / (2.0 * AC_V_ss * AC_F) + AV_i_rel * AC_V_sr / AC_V_ss - AV_i_xfer * AC_V_c / AC_V_ss; + AV_i_Na = AC_g_Na * pow(NV_Ith_S(y, 4), 3.0) * NV_Ith_S(y, 5) * NV_Ith_S(y, 6) * (NV_Ith_S(y, 0) - AV_E_Na); + AV_xK1_inf = AV_alpha_K1 / (AV_alpha_K1 + AV_beta_K1); + AV_i_Ks = AC_g_Ks * pow(NV_Ith_S(y, 3), 2.0) * (NV_Ith_S(y, 0) - AV_E_Ks); + AV_i_b_Na = AC_g_bna * (NV_Ith_S(y, 0) - AV_E_Na); + fy_ptrs[13][j] = AV_ddt_Ca_i_total * AV_f_JCa_i_free; + fy_ptrs[15][j] = AV_ddt_Ca_ss_total * AV_f_JCa_ss_free; + AV_i_K1 = AC_g_K1 * AV_xK1_inf * sqrt(AC_K_o / 5.4) * (NV_Ith_S(y, 0) - AV_E_K); + fy_ptrs[17][j] = (-(AV_i_Na + AV_i_b_Na + 3.0 * AV_i_NaK + 3.0 * AV_i_NaCa)) / (AC_V_c * AC_F) * AC_Cm; + fy_ptrs[0][j] = scale * (-(AV_i_K1 + AV_i_to + AV_i_Kr + AV_i_Ks + AV_i_CaL + AV_i_NaK + AV_i_Na + AV_i_b_Na + AV_i_NaCa + AV_i_b_Ca + AV_i_p_K + AV_i_p_Ca)); + fy_ptrs[18][j] = (-(AV_i_K1 + AV_i_to + AV_i_Kr + AV_i_Ks + AV_i_p_K - 2.0 * AV_i_NaK)) / (AC_V_c * AC_F) * AC_Cm; + } +} + +void TenTusscher2006_epi::lmbda_yinf_exp(py::array_t &y_list, py::array_t &lmbda_list, py::array_t &yinf_list) +{ + double *y_ptrs[size]; + double *lmbda_ptrs[size]; + double *yinf_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(lmbda_list, lmbda_ptrs, N, n_dofs); + get_raw_data(yinf_list, yinf_ptrs, N, n_dofs); + + double y[size]; + double AV_alpha_d, AV_beta_d, AV_gamma_d, AV_tau_d, AV_tau_f2, AV_tau_fCass, AV_tau_f; + double AV_alpha_h, AV_beta_h, AV_tau_h, AV_alpha_j, AV_beta_j, AV_tau_j, AV_alpha_m, AV_beta_m, AV_tau_m; + double AV_alpha_xr1, AV_beta_xr1, AV_tau_xr1, AV_alpha_xr2, AV_beta_xr2, AV_tau_xr2, AV_alpha_xs, AV_beta_xs, AV_tau_xs; + double AV_tau_r, AV_tau_s; + + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // # /* L_type_Ca_current_d_gate */ + AV_alpha_d = 1.4 / (1.0 + exp(((-35.0) - NV_Ith_S(y, 0)) / 13.0)) + 0.25; + AV_beta_d = 1.4 / (1.0 + exp((NV_Ith_S(y, 0) + 5.0) / 5.0)); + yinf_ptrs[7][j] = 1.0 / (1.0 + exp(((-8.0) - NV_Ith_S(y, 0)) / 7.5)); + AV_gamma_d = 1.0 / (1.0 + exp((50.0 - NV_Ith_S(y, 0)) / 20.0)); + AV_tau_d = 1.0 * AV_alpha_d * AV_beta_d + AV_gamma_d; + lmbda_ptrs[7][j] = -1. / AV_tau_d; + + // # /* L_type_Ca_current_f2_gate */ + yinf_ptrs[9][j] = 0.67 / (1.0 + exp((NV_Ith_S(y, 0) + 35.0) / 7.0)) + 0.33; + AV_tau_f2 = 562.0 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 240.0) + 31.0 / (1.0 + exp((25.0 - NV_Ith_S(y, 0)) / 10.0)) + 80.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)); + lmbda_ptrs[9][j] = -1. / AV_tau_f2; + + // # /* L_type_Ca_current_fCass_gate */ + yinf_ptrs[10][j] = 0.6 / (1.0 + pow(NV_Ith_S(y, 15) / 0.05, 2.0)) + 0.4; + AV_tau_fCass = 80.0 / (1.0 + pow(NV_Ith_S(y, 15) / 0.05, 2.0)) + 2.0; + lmbda_ptrs[10][j] = -1. / AV_tau_fCass; + + // # /* L_type_Ca_current_f_gate */ + yinf_ptrs[8][j] = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 20.0) / 7.0)); + AV_tau_f = 1102.5 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 225.0) + 200.0 / (1.0 + exp((13.0 - NV_Ith_S(y, 0)) / 10.0)) + 180.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)) + 20.0; + lmbda_ptrs[8][j] = -1. / AV_tau_f; + + // # /* fast_sodium_current_h_gate */ + AV_alpha_h = NV_Ith_S(y, 0) < (-40.0) ? 0.057 * exp((-(NV_Ith_S(y, 0) + 80.0)) / 6.8) : 0.0; + AV_beta_h = NV_Ith_S(y, 0) < (-40.0) ? 2.7 * exp(0.079 * NV_Ith_S(y, 0)) + 310000.0 * exp(0.3485 * NV_Ith_S(y, 0)) : 0.77 / (0.13 * (1.0 + exp((NV_Ith_S(y, 0) + 10.66) / (-11.1)))); + yinf_ptrs[5][j] = 1.0 / pow(1.0 + exp((NV_Ith_S(y, 0) + 71.55) / 7.43), 2.0); + AV_tau_h = 1.0 / (AV_alpha_h + AV_beta_h); + lmbda_ptrs[5][j] = -1. / AV_tau_h; + + // # /* fast_sodium_current_j_gate */ + AV_alpha_j = NV_Ith_S(y, 0) < (-40.0) ? ((-25428.0) * exp(0.2444 * NV_Ith_S(y, 0)) - 6.948e-06 * exp((-0.04391) * NV_Ith_S(y, 0))) * (NV_Ith_S(y, 0) + 37.78) / 1.0 / (1.0 + exp(0.311 * (NV_Ith_S(y, 0) + 79.23))) : 0.0; + AV_beta_j = NV_Ith_S(y, 0) < (-40.0) ? 0.02424 * exp((-0.01052) * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1378) * (NV_Ith_S(y, 0) + 40.14))) : 0.6 * exp(0.057 * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1) * (NV_Ith_S(y, 0) + 32.0))); + yinf_ptrs[6][j] = 1.0 / pow(1.0 + exp((NV_Ith_S(y, 0) + 71.55) / 7.43), 2.0); + AV_tau_j = 1.0 / (AV_alpha_j + AV_beta_j); + lmbda_ptrs[6][j] = -1. / AV_tau_j; + + // # /* fast_sodium_current_m_gate */ + AV_alpha_m = 1.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 5.0)); + AV_beta_m = 0.1 / (1.0 + exp((NV_Ith_S(y, 0) + 35.0) / 5.0)) + 0.1 / (1.0 + exp((NV_Ith_S(y, 0) - 50.0) / 200.0)); + yinf_ptrs[4][j] = 1.0 / pow(1.0 + exp(((-56.86) - NV_Ith_S(y, 0)) / 9.03), 2.0); + AV_tau_m = 1.0 * AV_alpha_m * AV_beta_m; + lmbda_ptrs[4][j] = -1. / AV_tau_m; + + // # /* rapid_time_dependent_potassium_current_Xr1_gate */ + AV_alpha_xr1 = 450.0 / (1.0 + exp(((-45.0) - NV_Ith_S(y, 0)) / 10.0)); + AV_beta_xr1 = 6.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 11.5)); + yinf_ptrs[1][j] = 1.0 / (1.0 + exp(((-26.0) - NV_Ith_S(y, 0)) / 7.0)); + AV_tau_xr1 = 1.0 * AV_alpha_xr1 * AV_beta_xr1; + lmbda_ptrs[1][j] = -1. / AV_tau_xr1; + + // # /* rapid_time_dependent_potassium_current_Xr2_gate */ + AV_alpha_xr2 = 3.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 20.0)); + AV_beta_xr2 = 1.12 / (1.0 + exp((NV_Ith_S(y, 0) - 60.0) / 20.0)); + yinf_ptrs[2][j] = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 88.0) / 24.0)); + AV_tau_xr2 = 1.0 * AV_alpha_xr2 * AV_beta_xr2; + lmbda_ptrs[2][j] = -1. / AV_tau_xr2; + + // # /* slow_time_dependent_potassium_current_Xs_gate */ + AV_alpha_xs = 1400.0 / sqrt(1.0 + exp((5.0 - NV_Ith_S(y, 0)) / 6.0)); + AV_beta_xs = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) - 35.0) / 15.0)); + yinf_ptrs[3][j] = 1.0 / (1.0 + exp(((-5.0) - NV_Ith_S(y, 0)) / 14.0)); + AV_tau_xs = 1.0 * AV_alpha_xs * AV_beta_xs + 80.0; + lmbda_ptrs[3][j] = -1. / AV_tau_xs; + + // # /* transient_outward_current_r_gate */ + yinf_ptrs[12][j] = 1.0 / (1.0 + exp((20.0 - NV_Ith_S(y, 0)) / 6.0)); + AV_tau_r = 9.5 * exp((-pow(NV_Ith_S(y, 0) + 40.0, 2.0)) / 1800.0) + 0.8; + lmbda_ptrs[12][j] = -1. / AV_tau_r; + + // # /* transient_outward_current_s_gate */ + yinf_ptrs[11][j] = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 20.0) / 5.0)); + AV_tau_s = 85.0 * exp((-pow(NV_Ith_S(y, 0) + 45.0, 2.0)) / 320.0) + 5.0 / (1.0 + exp((NV_Ith_S(y, 0) - 20.0) / 5.0)) + 3.0; + lmbda_ptrs[11][j] = -1. / AV_tau_s; + } +} + +void TenTusscher2006_epi::lmbda_exp(py::array_t &y_list, py::array_t &lmbda_list) +{ + double *y_ptrs[size]; + double *lmbda_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(lmbda_list, lmbda_ptrs, N, n_dofs); + + double y[size]; + double AV_alpha_d, AV_beta_d, AV_gamma_d, AV_tau_d, AV_tau_f2, AV_tau_fCass, AV_tau_f; + double AV_alpha_h, AV_beta_h, AV_tau_h, AV_alpha_j, AV_beta_j, AV_tau_j, AV_alpha_m, AV_beta_m, AV_tau_m; + double AV_alpha_xr1, AV_beta_xr1, AV_tau_xr1, AV_alpha_xr2, AV_beta_xr2, AV_tau_xr2, AV_alpha_xs, AV_beta_xs, AV_tau_xs; + double AV_tau_r, AV_tau_s; + + // Remember to scale the first variable!!! + + for (unsigned j = 0; j < n_dofs; j++) + { + + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // # /* L_type_Ca_current_d_gate */ + AV_alpha_d = 1.4 / (1.0 + exp(((-35.0) - NV_Ith_S(y, 0)) / 13.0)) + 0.25; + AV_beta_d = 1.4 / (1.0 + exp((NV_Ith_S(y, 0) + 5.0) / 5.0)); + AV_gamma_d = 1.0 / (1.0 + exp((50.0 - NV_Ith_S(y, 0)) / 20.0)); + AV_tau_d = 1.0 * AV_alpha_d * AV_beta_d + AV_gamma_d; + lmbda_ptrs[7][j] = -1. / AV_tau_d; + + // # /* L_type_Ca_current_f2_gate */ + AV_tau_f2 = 562.0 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 240.0) + 31.0 / (1.0 + exp((25.0 - NV_Ith_S(y, 0)) / 10.0)) + 80.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)); + lmbda_ptrs[9][j] = -1. / AV_tau_f2; + + // # /* L_type_Ca_current_fCass_gate */ + AV_tau_fCass = 80.0 / (1.0 + pow(NV_Ith_S(y, 15) / 0.05, 2.0)) + 2.0; + lmbda_ptrs[10][j] = -1. / AV_tau_fCass; + + // # /* L_type_Ca_current_f_gate */ + AV_tau_f = 1102.5 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 225.0) + 200.0 / (1.0 + exp((13.0 - NV_Ith_S(y, 0)) / 10.0)) + 180.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)) + 20.0; + lmbda_ptrs[8][j] = -1. / AV_tau_f; + + // # /* fast_sodium_current_h_gate */ + AV_alpha_h = NV_Ith_S(y, 0) < (-40.0) ? 0.057 * exp((-(NV_Ith_S(y, 0) + 80.0)) / 6.8) : 0.0; + AV_beta_h = NV_Ith_S(y, 0) < (-40.0) ? 2.7 * exp(0.079 * NV_Ith_S(y, 0)) + 310000.0 * exp(0.3485 * NV_Ith_S(y, 0)) : 0.77 / (0.13 * (1.0 + exp((NV_Ith_S(y, 0) + 10.66) / (-11.1)))); + AV_tau_h = 1.0 / (AV_alpha_h + AV_beta_h); + lmbda_ptrs[5][j] = -1. / AV_tau_h; + + // # /* fast_sodium_current_j_gate */ + AV_alpha_j = NV_Ith_S(y, 0) < (-40.0) ? ((-25428.0) * exp(0.2444 * NV_Ith_S(y, 0)) - 6.948e-06 * exp((-0.04391) * NV_Ith_S(y, 0))) * (NV_Ith_S(y, 0) + 37.78) / 1.0 / (1.0 + exp(0.311 * (NV_Ith_S(y, 0) + 79.23))) : 0.0; + AV_beta_j = NV_Ith_S(y, 0) < (-40.0) ? 0.02424 * exp((-0.01052) * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1378) * (NV_Ith_S(y, 0) + 40.14))) : 0.6 * exp(0.057 * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1) * (NV_Ith_S(y, 0) + 32.0))); + AV_tau_j = 1.0 / (AV_alpha_j + AV_beta_j); + lmbda_ptrs[6][j] = -1. / AV_tau_j; + + // # /* fast_sodium_current_m_gate */ + AV_alpha_m = 1.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 5.0)); + AV_beta_m = 0.1 / (1.0 + exp((NV_Ith_S(y, 0) + 35.0) / 5.0)) + 0.1 / (1.0 + exp((NV_Ith_S(y, 0) - 50.0) / 200.0)); + AV_tau_m = 1.0 * AV_alpha_m * AV_beta_m; + lmbda_ptrs[4][j] = -1. / AV_tau_m; + + // # /* rapid_time_dependent_potassium_current_Xr1_gate */ + AV_alpha_xr1 = 450.0 / (1.0 + exp(((-45.0) - NV_Ith_S(y, 0)) / 10.0)); + AV_beta_xr1 = 6.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 11.5)); + AV_tau_xr1 = 1.0 * AV_alpha_xr1 * AV_beta_xr1; + lmbda_ptrs[1][j] = -1. / AV_tau_xr1; + + // # /* rapid_time_dependent_potassium_current_Xr2_gate */ + AV_alpha_xr2 = 3.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 20.0)); + AV_beta_xr2 = 1.12 / (1.0 + exp((NV_Ith_S(y, 0) - 60.0) / 20.0)); + AV_tau_xr2 = 1.0 * AV_alpha_xr2 * AV_beta_xr2; + lmbda_ptrs[2][j] = -1. / AV_tau_xr2; + + // # /* slow_time_dependent_potassium_current_Xs_gate */ + AV_alpha_xs = 1400.0 / sqrt(1.0 + exp((5.0 - NV_Ith_S(y, 0)) / 6.0)); + AV_beta_xs = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) - 35.0) / 15.0)); + AV_tau_xs = 1.0 * AV_alpha_xs * AV_beta_xs + 80.0; + lmbda_ptrs[3][j] = -1. / AV_tau_xs; + + // # /* transient_outward_current_r_gate */ + AV_tau_r = 9.5 * exp((-pow(NV_Ith_S(y, 0) + 40.0, 2.0)) / 1800.0) + 0.8; + lmbda_ptrs[12][j] = -1. / AV_tau_r; + + // # /* transient_outward_current_s_gate */ + AV_tau_s = 85.0 * exp((-pow(NV_Ith_S(y, 0) + 45.0, 2.0)) / 320.0) + 5.0 / (1.0 + exp((NV_Ith_S(y, 0) - 20.0) / 5.0)) + 3.0; + lmbda_ptrs[11][j] = -1. / AV_tau_s; + } +} + +double TenTusscher2006_epi::rho_f_expl() +{ + return 6.5; +} + +#endif \ No newline at end of file diff --git a/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/tentusscher_smooth.h b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/tentusscher_smooth.h new file mode 100644 index 0000000000..7469e94d2c --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/ionicmodels/cpp/tentusscher_smooth.h @@ -0,0 +1,550 @@ +#include +#include + +#include +#include +#include + +#include "ionicmodel.h" + +#ifndef TENTUSSCHER_SMOOTH +#define TENTUSSCHER_SMOOTH + +/* +The original TenTusscher2006_epi model has if clauses in the right hand side, which makes it non-smooth. +This is the original TenTusscher2006_epi model, but where if clauses are removed in order to obtain a smooth right hand side. +This model is used only for convergence tests with high order methods, since with the original one the relative error (typically) stagnates at 1e-8. +*/ + +class TenTusscher2006_epi_smooth : public IonicModel +{ +public: + TenTusscher2006_epi_smooth(const double scale_); + ~TenTusscher2006_epi_smooth(){}; + void f(py::array_t &y, py::array_t &fy); + void f_expl(py::array_t &y, py::array_t &fy); + void lmbda_exp(py::array_t &y_list, py::array_t &lmbda_list); + void lmbda_yinf_exp(py::array_t &y_list, py::array_t &lmbda_list, py::array_t &yinf_list); + py::list initial_values(); + double rho_f_expl(); + +private: + double AC_Cm, AC_K_pCa, AC_g_pCa, AC_g_CaL, AC_g_bca, AC_Buf_c, AC_Buf_sr, AC_Buf_ss, AC_Ca_o, AC_EC, AC_K_buf_c, AC_K_buf_sr, AC_K_buf_ss, AC_K_up, AC_V_leak, AC_V_rel, AC_V_sr, AC_V_ss, AC_V_xfer, AC_Vmax_up, AC_k1_prime, AC_k2_prime, AC_k3, AC_k4, AC_max_sr, AC_min_sr, AC_g_Na, AC_g_K1, AC_F, AC_R, AC_T, AC_V_c, AC_stim_amplitude, AC_K_o, AC_g_pK, AC_g_Kr, AC_P_kna, AC_g_Ks, AC_g_bna, AC_K_NaCa, AC_K_sat, AC_Km_Ca, AC_Km_Nai, AC_alpha, AC_sodium_calcium_exchanger_current_gamma, AC_Na_o, AC_K_mNa, AC_K_mk, AC_P_NaK, AC_g_to; +}; + +TenTusscher2006_epi_smooth::TenTusscher2006_epi_smooth(const double scale_) + : IonicModel(scale_) +{ + size = 19; + + AC_Cm = 1.0; // 185.0; + AC_K_pCa = 0.0005; + AC_g_pCa = 0.1238; + AC_g_CaL = 0.0398; + AC_g_bca = 0.000592; + AC_Buf_c = 0.2; + AC_Buf_sr = 10.0; + AC_Buf_ss = 0.4; + AC_Ca_o = 2.0; + AC_EC = 1.5; + AC_K_buf_c = 0.001; + AC_K_buf_sr = 0.3; + AC_K_buf_ss = 0.00025; + AC_K_up = 0.00025; + AC_V_leak = 0.00036; + AC_V_rel = 0.102; + AC_V_sr = 1094.0; + AC_V_ss = 54.68; + AC_V_xfer = 0.0038; + AC_Vmax_up = 0.006375; + AC_k1_prime = 0.15; + AC_k2_prime = 0.045; + AC_k3 = 0.06; + AC_k4 = 0.005; + AC_max_sr = 2.5; + AC_min_sr = 1.0; + AC_g_Na = 14.838; + AC_g_K1 = 5.405; + AC_F = 96.485; + AC_R = 8.314; + AC_T = 310.0; + AC_V_c = 16404.0; + AC_stim_amplitude = (-52.0); + AC_K_o = 5.4; + AC_g_pK = 0.0146; + AC_g_Kr = 0.153; + AC_P_kna = 0.03; + AC_g_Ks = 0.392; + AC_g_bna = 0.00029; + AC_K_NaCa = 1000.0; + AC_K_sat = 0.1; + AC_Km_Ca = 1.38; + AC_Km_Nai = 87.5; + AC_alpha = 2.5; + AC_sodium_calcium_exchanger_current_gamma = 0.35; + AC_Na_o = 140.0; + AC_K_mNa = 40.0; + AC_K_mk = 1.0; + AC_P_NaK = 2.724; + AC_g_to = 0.294; + + assign(f_expl_args, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); + assign(f_exp_args, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15}); + assign(f_expl_indeces, {0, 13, 14, 15, 16, 17, 18}); + assign(f_exp_indeces, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); +} + +py::list TenTusscher2006_epi_smooth::initial_values() +{ + py::list y0(size); + y0[0] = -85.23; + y0[1] = 0.00621; + y0[2] = 0.4712; + y0[3] = 0.0095; + y0[4] = 0.00172; + y0[5] = 0.7444; + y0[6] = 0.7045; + y0[7] = 3.373e-05; + y0[8] = 0.7888; + y0[9] = 0.9755; + y0[10] = 0.9953; + y0[11] = 0.999998; + y0[12] = 2.42e-08; + y0[13] = 0.000126; + y0[14] = 3.64; + y0[15] = 0.00036; + y0[16] = 0.9073; + y0[17] = 8.604; + y0[18] = 136.89; + + return y0; +} + +void TenTusscher2006_epi_smooth::f(py::array_t &y_list, py::array_t &fy_list) +{ + double *y_ptrs[size]; + double *fy_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(fy_list, fy_ptrs, N, n_dofs); + + double y[size]; + // # needed for linear in gating variables + double AV_alpha_d, AV_beta_d, AV_d_inf, AV_gamma_d, AV_tau_d, AV_f2_inf, AV_tau_f2, AV_fCass_inf, AV_tau_fCass, AV_f_inf, AV_tau_f; + double AV_alpha_h, AV_beta_h, AV_h_inf, AV_tau_h, AV_alpha_j, AV_beta_j, AV_j_inf, AV_tau_j, AV_alpha_m, AV_beta_m, AV_m_inf, AV_tau_m; + double AV_alpha_xr1, AV_beta_xr1, AV_xr1_inf, AV_tau_xr1, AV_alpha_xr2, AV_beta_xr2, AV_xr2_inf, AV_tau_xr2, AV_alpha_xs, AV_beta_xs, AV_xs_inf, AV_tau_xs; + double AV_r_inf, AV_tau_r, AV_s_inf, AV_tau_s; + // # needed for nonlinear in gating variables + double AV_f_JCa_i_free, AV_f_JCa_sr_free, AV_f_JCa_ss_free, AV_i_leak, AV_i_up, AV_i_xfer, AV_kcasr, AV_k1, AV_k2, AV_O, AV_i_rel, AV_ddt_Ca_sr_total; + double AV_E_Ca, AV_E_K, AV_i_NaK, AV_i_to, AV_i_p_Ca, AV_i_CaL, AV_i_b_Ca, AV_alpha_K1, AV_beta_K1, AV_i_p_K, AV_i_Kr, AV_E_Ks, AV_E_Na, AV_i_NaCa; + double AV_ddt_Ca_i_total, AV_ddt_Ca_ss_total, AV_i_Na, AV_i_K1, AV_xK1_inf, AV_i_Ks, AV_i_b_Na; + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // # Linear in gating variables + + // # /* L_type_Ca_current_d_gate */ + AV_alpha_d = 1.4 / (1.0 + exp(((-35.0) - NV_Ith_S(y, 0)) / 13.0)) + 0.25; + AV_beta_d = 1.4 / (1.0 + exp((NV_Ith_S(y, 0) + 5.0) / 5.0)); + AV_d_inf = 1.0 / (1.0 + exp(((-8.0) - NV_Ith_S(y, 0)) / 7.5)); + AV_gamma_d = 1.0 / (1.0 + exp((50.0 - NV_Ith_S(y, 0)) / 20.0)); + AV_tau_d = 1.0 * AV_alpha_d * AV_beta_d + AV_gamma_d; + fy_ptrs[7][j] = (AV_d_inf - NV_Ith_S(y, 7)) / AV_tau_d; + + // # /* L_type_Ca_current_f2_gate */ + AV_f2_inf = 0.67 / (1.0 + exp((NV_Ith_S(y, 0) + 35.0) / 7.0)) + 0.33; + AV_tau_f2 = 562.0 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 240.0) + 31.0 / (1.0 + exp((25.0 - NV_Ith_S(y, 0)) / 10.0)) + 80.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)); + fy_ptrs[9][j] = (AV_f2_inf - NV_Ith_S(y, 9)) / AV_tau_f2; + + // # /* L_type_Ca_current_fCass_gate */ + AV_fCass_inf = 0.6 / (1.0 + pow(NV_Ith_S(y, 15) / 0.05, 2.0)) + 0.4; + AV_tau_fCass = 80.0 / (1.0 + pow(NV_Ith_S(y, 15) / 0.05, 2.0)) + 2.0; + fy_ptrs[10][j] = (AV_fCass_inf - NV_Ith_S(y, 10)) / AV_tau_fCass; + + // # /* L_type_Ca_current_f_gate */ + AV_f_inf = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 20.0) / 7.0)); + AV_tau_f = 1102.5 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 225.0) + 200.0 / (1.0 + exp((13.0 - NV_Ith_S(y, 0)) / 10.0)) + 180.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)) + 20.0; + fy_ptrs[8][j] = (AV_f_inf - NV_Ith_S(y, 8)) / AV_tau_f; + + // # /* fast_sodium_current_h_gate */ + AV_alpha_h = 0.0; + AV_beta_h = 0.77 / (0.13 * (1.0 + exp((NV_Ith_S(y, 0) + 10.66) / (-11.1)))); + AV_h_inf = 1.0 / pow(1.0 + exp((NV_Ith_S(y, 0) + 71.55) / 7.43), 2.0); + AV_tau_h = 1.0 / (AV_alpha_h + AV_beta_h); + fy_ptrs[5][j] = (AV_h_inf - NV_Ith_S(y, 5)) / AV_tau_h; + + // # /* fast_sodium_current_j_gate */ + AV_alpha_j = 0.0; + AV_beta_j = 0.6 * exp(0.057 * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1) * (NV_Ith_S(y, 0) + 32.0))); + AV_j_inf = 1.0 / pow(1.0 + exp((NV_Ith_S(y, 0) + 71.55) / 7.43), 2.0); + AV_tau_j = 1.0 / (AV_alpha_j + AV_beta_j); + fy_ptrs[6][j] = (AV_j_inf - NV_Ith_S(y, 6)) / AV_tau_j; + + // # /* fast_sodium_current_m_gate */ + AV_alpha_m = 1.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 5.0)); + AV_beta_m = 0.1 / (1.0 + exp((NV_Ith_S(y, 0) + 35.0) / 5.0)) + 0.1 / (1.0 + exp((NV_Ith_S(y, 0) - 50.0) / 200.0)); + AV_m_inf = 1.0 / pow(1.0 + exp(((-56.86) - NV_Ith_S(y, 0)) / 9.03), 2.0); + AV_tau_m = 1.0 * AV_alpha_m * AV_beta_m; + fy_ptrs[4][j] = (AV_m_inf - NV_Ith_S(y, 4)) / AV_tau_m; + + // # /* rapid_time_dependent_potassium_current_Xr1_gate */ + AV_alpha_xr1 = 450.0 / (1.0 + exp(((-45.0) - NV_Ith_S(y, 0)) / 10.0)); + AV_beta_xr1 = 6.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 11.5)); + AV_xr1_inf = 1.0 / (1.0 + exp(((-26.0) - NV_Ith_S(y, 0)) / 7.0)); + AV_tau_xr1 = 1.0 * AV_alpha_xr1 * AV_beta_xr1; + fy_ptrs[1][j] = (AV_xr1_inf - NV_Ith_S(y, 1)) / AV_tau_xr1; + + // # /* rapid_time_dependent_potassium_current_Xr2_gate */ + AV_alpha_xr2 = 3.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 20.0)); + AV_beta_xr2 = 1.12 / (1.0 + exp((NV_Ith_S(y, 0) - 60.0) / 20.0)); + AV_xr2_inf = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 88.0) / 24.0)); + AV_tau_xr2 = 1.0 * AV_alpha_xr2 * AV_beta_xr2; + fy_ptrs[2][j] = (AV_xr2_inf - NV_Ith_S(y, 2)) / AV_tau_xr2; + + // # /* slow_time_dependent_potassium_current_Xs_gate */ + AV_alpha_xs = 1400.0 / sqrt(1.0 + exp((5.0 - NV_Ith_S(y, 0)) / 6.0)); + AV_beta_xs = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) - 35.0) / 15.0)); + AV_xs_inf = 1.0 / (1.0 + exp(((-5.0) - NV_Ith_S(y, 0)) / 14.0)); + AV_tau_xs = 1.0 * AV_alpha_xs * AV_beta_xs + 80.0; + fy_ptrs[3][j] = (AV_xs_inf - NV_Ith_S(y, 3)) / AV_tau_xs; + + // # /* transient_outward_current_r_gate */ + AV_r_inf = 1.0 / (1.0 + exp((20.0 - NV_Ith_S(y, 0)) / 6.0)); + AV_tau_r = 9.5 * exp((-pow(NV_Ith_S(y, 0) + 40.0, 2.0)) / 1800.0) + 0.8; + fy_ptrs[12][j] = (AV_r_inf - NV_Ith_S(y, 12)) / AV_tau_r; + + // # /* transient_outward_current_s_gate */ + AV_s_inf = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 20.0) / 5.0)); + AV_tau_s = 85.0 * exp((-pow(NV_Ith_S(y, 0) + 45.0, 2.0)) / 320.0) + 5.0 / (1.0 + exp((NV_Ith_S(y, 0) - 20.0) / 5.0)) + 3.0; + fy_ptrs[11][j] = (AV_s_inf - NV_Ith_S(y, 11)) / AV_tau_s; + + // # Non linear in gating variables + + // # /* calcium_dynamics */ + AV_f_JCa_i_free = 1.0 / (1.0 + AC_Buf_c * AC_K_buf_c / pow(NV_Ith_S(y, 13) + AC_K_buf_c, 2.0)); + AV_f_JCa_sr_free = 1.0 / (1.0 + AC_Buf_sr * AC_K_buf_sr / pow(NV_Ith_S(y, 14) + AC_K_buf_sr, 2.0)); + AV_f_JCa_ss_free = 1.0 / (1.0 + AC_Buf_ss * AC_K_buf_ss / pow(NV_Ith_S(y, 15) + AC_K_buf_ss, 2.0)); + AV_i_leak = AC_V_leak * (NV_Ith_S(y, 14) - NV_Ith_S(y, 13)); + AV_i_up = AC_Vmax_up / (1.0 + pow(AC_K_up, 2.0) / pow(NV_Ith_S(y, 13), 2.0)); + AV_i_xfer = AC_V_xfer * (NV_Ith_S(y, 15) - NV_Ith_S(y, 13)); + AV_kcasr = AC_max_sr - (AC_max_sr - AC_min_sr) / (1.0 + pow(AC_EC / NV_Ith_S(y, 14), 2.0)); + AV_k1 = AC_k1_prime / AV_kcasr; + AV_k2 = AC_k2_prime * AV_kcasr; + AV_O = AV_k1 * pow(NV_Ith_S(y, 15), 2.0) * NV_Ith_S(y, 16) / (AC_k3 + AV_k1 * pow(NV_Ith_S(y, 15), 2.0)); + fy_ptrs[16][j] = (-AV_k2) * NV_Ith_S(y, 15) * NV_Ith_S(y, 16) + AC_k4 * (1.0 - NV_Ith_S(y, 16)); + AV_i_rel = AC_V_rel * AV_O * (NV_Ith_S(y, 14) - NV_Ith_S(y, 15)); + AV_ddt_Ca_sr_total = AV_i_up - (AV_i_rel + AV_i_leak); + fy_ptrs[14][j] = AV_ddt_Ca_sr_total * AV_f_JCa_sr_free; + + // # /* reversal_potentials */ + AV_E_Ca = 0.5 * AC_R * AC_T / AC_F * log(AC_Ca_o / NV_Ith_S(y, 13)); + AV_E_K = AC_R * AC_T / AC_F * log(AC_K_o / NV_Ith_S(y, 18)); + + // # /* sodium_potassium_pump_current */ + AV_i_NaK = AC_P_NaK * AC_K_o / (AC_K_o + AC_K_mk) * NV_Ith_S(y, 17) / (NV_Ith_S(y, 17) + AC_K_mNa) / (1.0 + 0.1245 * exp((-0.1) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) + 0.0353 * exp((-NV_Ith_S(y, 0)) * AC_F / (AC_R * AC_T))); + + // # /* transient_outward_current */ + AV_i_to = AC_g_to * NV_Ith_S(y, 12) * NV_Ith_S(y, 11) * (NV_Ith_S(y, 0) - AV_E_K); + + // # /* calcium_pump_current */ + AV_i_p_Ca = AC_g_pCa * NV_Ith_S(y, 13) / (NV_Ith_S(y, 13) + AC_K_pCa); + + // # /* *remaining* */ + AV_i_CaL = AC_g_CaL * NV_Ith_S(y, 7) * NV_Ith_S(y, 8) * NV_Ith_S(y, 9) * NV_Ith_S(y, 10) * 4.0 * (NV_Ith_S(y, 0) - 15.0) * pow(AC_F, 2.0) / (AC_R * AC_T) * (0.25 * NV_Ith_S(y, 15) * exp(2.0 * (NV_Ith_S(y, 0) - 15.0) * AC_F / (AC_R * AC_T)) - AC_Ca_o) / (exp(2.0 * (NV_Ith_S(y, 0) - 15.0) * AC_F / (AC_R * AC_T)) - 1.0); + AV_i_b_Ca = AC_g_bca * (NV_Ith_S(y, 0) - AV_E_Ca); + AV_alpha_K1 = 0.1 / (1.0 + exp(0.06 * (NV_Ith_S(y, 0) - AV_E_K - 200.0))); + AV_beta_K1 = (3.0 * exp(0.0002 * (NV_Ith_S(y, 0) - AV_E_K + 100.0)) + exp(0.1 * (NV_Ith_S(y, 0) - AV_E_K - 10.0))) / (1.0 + exp((-0.5) * (NV_Ith_S(y, 0) - AV_E_K))); + AV_i_p_K = AC_g_pK * (NV_Ith_S(y, 0) - AV_E_K) / (1.0 + exp((25.0 - NV_Ith_S(y, 0)) / 5.98)); + AV_i_Kr = AC_g_Kr * sqrt(AC_K_o / 5.4) * NV_Ith_S(y, 1) * NV_Ith_S(y, 2) * (NV_Ith_S(y, 0) - AV_E_K); + AV_E_Ks = AC_R * AC_T / AC_F * log((AC_K_o + AC_P_kna * AC_Na_o) / (NV_Ith_S(y, 18) + AC_P_kna * NV_Ith_S(y, 17))); + AV_E_Na = AC_R * AC_T / AC_F * log(AC_Na_o / NV_Ith_S(y, 17)); + AV_i_NaCa = AC_K_NaCa * (exp(AC_sodium_calcium_exchanger_current_gamma * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) * pow(NV_Ith_S(y, 17), 3.0) * AC_Ca_o - exp((AC_sodium_calcium_exchanger_current_gamma - 1.0) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) * pow(AC_Na_o, 3.0) * NV_Ith_S(y, 13) * AC_alpha) / ((pow(AC_Km_Nai, 3.0) + pow(AC_Na_o, 3.0)) * (AC_Km_Ca + AC_Ca_o) * (1.0 + AC_K_sat * exp((AC_sodium_calcium_exchanger_current_gamma - 1.0) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)))); + AV_ddt_Ca_i_total = (-(AV_i_b_Ca + AV_i_p_Ca - 2.0 * AV_i_NaCa)) * AC_Cm / (2.0 * AC_V_c * AC_F) + (AV_i_leak - AV_i_up) * AC_V_sr / AC_V_c + AV_i_xfer; + AV_ddt_Ca_ss_total = (-AV_i_CaL) * AC_Cm / (2.0 * AC_V_ss * AC_F) + AV_i_rel * AC_V_sr / AC_V_ss - AV_i_xfer * AC_V_c / AC_V_ss; + AV_i_Na = AC_g_Na * pow(NV_Ith_S(y, 4), 3.0) * NV_Ith_S(y, 5) * NV_Ith_S(y, 6) * (NV_Ith_S(y, 0) - AV_E_Na); + AV_xK1_inf = AV_alpha_K1 / (AV_alpha_K1 + AV_beta_K1); + AV_i_Ks = AC_g_Ks * pow(NV_Ith_S(y, 3), 2.0) * (NV_Ith_S(y, 0) - AV_E_Ks); + AV_i_b_Na = AC_g_bna * (NV_Ith_S(y, 0) - AV_E_Na); + fy_ptrs[13][j] = AV_ddt_Ca_i_total * AV_f_JCa_i_free; + fy_ptrs[15][j] = AV_ddt_Ca_ss_total * AV_f_JCa_ss_free; + AV_i_K1 = AC_g_K1 * AV_xK1_inf * sqrt(AC_K_o / 5.4) * (NV_Ith_S(y, 0) - AV_E_K); + fy_ptrs[17][j] = (-(AV_i_Na + AV_i_b_Na + 3.0 * AV_i_NaK + 3.0 * AV_i_NaCa)) / (AC_V_c * AC_F) * AC_Cm; + fy_ptrs[0][j] = scale * (-(AV_i_K1 + AV_i_to + AV_i_Kr + AV_i_Ks + AV_i_CaL + AV_i_NaK + AV_i_Na + AV_i_b_Na + AV_i_NaCa + AV_i_b_Ca + AV_i_p_K + AV_i_p_Ca)); + fy_ptrs[18][j] = (-(AV_i_K1 + AV_i_to + AV_i_Kr + AV_i_Ks + AV_i_p_K - 2.0 * AV_i_NaK)) / (AC_V_c * AC_F) * AC_Cm; + } +} + +void TenTusscher2006_epi_smooth::f_expl(py::array_t &y_list, py::array_t &fy_list) +{ + double *y_ptrs[size]; + double *fy_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(fy_list, fy_ptrs, N, n_dofs); + + double y[size]; + // # needed for nonlinear in gating variables + double AV_f_JCa_i_free, AV_f_JCa_sr_free, AV_f_JCa_ss_free, AV_i_leak, AV_i_up, AV_i_xfer, AV_kcasr, AV_k1, AV_k2, AV_O, AV_i_rel, AV_ddt_Ca_sr_total; + double AV_E_Ca, AV_E_K, AV_i_NaK, AV_i_to, AV_i_p_Ca, AV_i_CaL, AV_i_b_Ca, AV_alpha_K1, AV_beta_K1, AV_i_p_K, AV_i_Kr, AV_E_Ks, AV_E_Na, AV_i_NaCa; + double AV_ddt_Ca_i_total, AV_ddt_Ca_ss_total, AV_i_Na, AV_i_K1, AV_xK1_inf, AV_i_Ks, AV_i_b_Na; + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // # Non linear in gating variables + + // # /* calcium_dynamics */ + AV_f_JCa_i_free = 1.0 / (1.0 + AC_Buf_c * AC_K_buf_c / pow(NV_Ith_S(y, 13) + AC_K_buf_c, 2.0)); + AV_f_JCa_sr_free = 1.0 / (1.0 + AC_Buf_sr * AC_K_buf_sr / pow(NV_Ith_S(y, 14) + AC_K_buf_sr, 2.0)); + AV_f_JCa_ss_free = 1.0 / (1.0 + AC_Buf_ss * AC_K_buf_ss / pow(NV_Ith_S(y, 15) + AC_K_buf_ss, 2.0)); + AV_i_leak = AC_V_leak * (NV_Ith_S(y, 14) - NV_Ith_S(y, 13)); + AV_i_up = AC_Vmax_up / (1.0 + pow(AC_K_up, 2.0) / pow(NV_Ith_S(y, 13), 2.0)); + AV_i_xfer = AC_V_xfer * (NV_Ith_S(y, 15) - NV_Ith_S(y, 13)); + AV_kcasr = AC_max_sr - (AC_max_sr - AC_min_sr) / (1.0 + pow(AC_EC / NV_Ith_S(y, 14), 2.0)); + AV_k1 = AC_k1_prime / AV_kcasr; + AV_k2 = AC_k2_prime * AV_kcasr; + AV_O = AV_k1 * pow(NV_Ith_S(y, 15), 2.0) * NV_Ith_S(y, 16) / (AC_k3 + AV_k1 * pow(NV_Ith_S(y, 15), 2.0)); + fy_ptrs[16][j] = (-AV_k2) * NV_Ith_S(y, 15) * NV_Ith_S(y, 16) + AC_k4 * (1.0 - NV_Ith_S(y, 16)); + AV_i_rel = AC_V_rel * AV_O * (NV_Ith_S(y, 14) - NV_Ith_S(y, 15)); + AV_ddt_Ca_sr_total = AV_i_up - (AV_i_rel + AV_i_leak); + fy_ptrs[14][j] = AV_ddt_Ca_sr_total * AV_f_JCa_sr_free; + + // # /* reversal_potentials */ + AV_E_Ca = 0.5 * AC_R * AC_T / AC_F * log(AC_Ca_o / NV_Ith_S(y, 13)); + AV_E_K = AC_R * AC_T / AC_F * log(AC_K_o / NV_Ith_S(y, 18)); + + // # /* sodium_potassium_pump_current */ + AV_i_NaK = AC_P_NaK * AC_K_o / (AC_K_o + AC_K_mk) * NV_Ith_S(y, 17) / (NV_Ith_S(y, 17) + AC_K_mNa) / (1.0 + 0.1245 * exp((-0.1) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) + 0.0353 * exp((-NV_Ith_S(y, 0)) * AC_F / (AC_R * AC_T))); + + // # /* transient_outward_current */ + AV_i_to = AC_g_to * NV_Ith_S(y, 12) * NV_Ith_S(y, 11) * (NV_Ith_S(y, 0) - AV_E_K); + + // # /* calcium_pump_current */ + AV_i_p_Ca = AC_g_pCa * NV_Ith_S(y, 13) / (NV_Ith_S(y, 13) + AC_K_pCa); + + // # /* *remaining* */ + AV_i_CaL = AC_g_CaL * NV_Ith_S(y, 7) * NV_Ith_S(y, 8) * NV_Ith_S(y, 9) * NV_Ith_S(y, 10) * 4.0 * (NV_Ith_S(y, 0) - 15.0) * pow(AC_F, 2.0) / (AC_R * AC_T) * (0.25 * NV_Ith_S(y, 15) * exp(2.0 * (NV_Ith_S(y, 0) - 15.0) * AC_F / (AC_R * AC_T)) - AC_Ca_o) / (exp(2.0 * (NV_Ith_S(y, 0) - 15.0) * AC_F / (AC_R * AC_T)) - 1.0); + AV_i_b_Ca = AC_g_bca * (NV_Ith_S(y, 0) - AV_E_Ca); + AV_alpha_K1 = 0.1 / (1.0 + exp(0.06 * (NV_Ith_S(y, 0) - AV_E_K - 200.0))); + AV_beta_K1 = (3.0 * exp(0.0002 * (NV_Ith_S(y, 0) - AV_E_K + 100.0)) + exp(0.1 * (NV_Ith_S(y, 0) - AV_E_K - 10.0))) / (1.0 + exp((-0.5) * (NV_Ith_S(y, 0) - AV_E_K))); + AV_i_p_K = AC_g_pK * (NV_Ith_S(y, 0) - AV_E_K) / (1.0 + exp((25.0 - NV_Ith_S(y, 0)) / 5.98)); + AV_i_Kr = AC_g_Kr * sqrt(AC_K_o / 5.4) * NV_Ith_S(y, 1) * NV_Ith_S(y, 2) * (NV_Ith_S(y, 0) - AV_E_K); + AV_E_Ks = AC_R * AC_T / AC_F * log((AC_K_o + AC_P_kna * AC_Na_o) / (NV_Ith_S(y, 18) + AC_P_kna * NV_Ith_S(y, 17))); + AV_E_Na = AC_R * AC_T / AC_F * log(AC_Na_o / NV_Ith_S(y, 17)); + AV_i_NaCa = AC_K_NaCa * (exp(AC_sodium_calcium_exchanger_current_gamma * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) * pow(NV_Ith_S(y, 17), 3.0) * AC_Ca_o - exp((AC_sodium_calcium_exchanger_current_gamma - 1.0) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)) * pow(AC_Na_o, 3.0) * NV_Ith_S(y, 13) * AC_alpha) / ((pow(AC_Km_Nai, 3.0) + pow(AC_Na_o, 3.0)) * (AC_Km_Ca + AC_Ca_o) * (1.0 + AC_K_sat * exp((AC_sodium_calcium_exchanger_current_gamma - 1.0) * NV_Ith_S(y, 0) * AC_F / (AC_R * AC_T)))); + AV_ddt_Ca_i_total = (-(AV_i_b_Ca + AV_i_p_Ca - 2.0 * AV_i_NaCa)) * AC_Cm / (2.0 * AC_V_c * AC_F) + (AV_i_leak - AV_i_up) * AC_V_sr / AC_V_c + AV_i_xfer; + AV_ddt_Ca_ss_total = (-AV_i_CaL) * AC_Cm / (2.0 * AC_V_ss * AC_F) + AV_i_rel * AC_V_sr / AC_V_ss - AV_i_xfer * AC_V_c / AC_V_ss; + AV_i_Na = AC_g_Na * pow(NV_Ith_S(y, 4), 3.0) * NV_Ith_S(y, 5) * NV_Ith_S(y, 6) * (NV_Ith_S(y, 0) - AV_E_Na); + AV_xK1_inf = AV_alpha_K1 / (AV_alpha_K1 + AV_beta_K1); + AV_i_Ks = AC_g_Ks * pow(NV_Ith_S(y, 3), 2.0) * (NV_Ith_S(y, 0) - AV_E_Ks); + AV_i_b_Na = AC_g_bna * (NV_Ith_S(y, 0) - AV_E_Na); + fy_ptrs[13][j] = AV_ddt_Ca_i_total * AV_f_JCa_i_free; + fy_ptrs[15][j] = AV_ddt_Ca_ss_total * AV_f_JCa_ss_free; + AV_i_K1 = AC_g_K1 * AV_xK1_inf * sqrt(AC_K_o / 5.4) * (NV_Ith_S(y, 0) - AV_E_K); + fy_ptrs[17][j] = (-(AV_i_Na + AV_i_b_Na + 3.0 * AV_i_NaK + 3.0 * AV_i_NaCa)) / (AC_V_c * AC_F) * AC_Cm; + fy_ptrs[0][j] = scale * (-(AV_i_K1 + AV_i_to + AV_i_Kr + AV_i_Ks + AV_i_CaL + AV_i_NaK + AV_i_Na + AV_i_b_Na + AV_i_NaCa + AV_i_b_Ca + AV_i_p_K + AV_i_p_Ca)); + fy_ptrs[18][j] = (-(AV_i_K1 + AV_i_to + AV_i_Kr + AV_i_Ks + AV_i_p_K - 2.0 * AV_i_NaK)) / (AC_V_c * AC_F) * AC_Cm; + } +} + +void TenTusscher2006_epi_smooth::lmbda_yinf_exp(py::array_t &y_list, py::array_t &lmbda_list, py::array_t &yinf_list) +{ + double *y_ptrs[size]; + double *lmbda_ptrs[size]; + double *yinf_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(lmbda_list, lmbda_ptrs, N, n_dofs); + get_raw_data(yinf_list, yinf_ptrs, N, n_dofs); + + double y[size]; + double AV_alpha_d, AV_beta_d, AV_gamma_d, AV_tau_d, AV_tau_f2, AV_tau_fCass, AV_tau_f; + double AV_alpha_h, AV_beta_h, AV_tau_h, AV_alpha_j, AV_beta_j, AV_tau_j, AV_alpha_m, AV_beta_m, AV_tau_m; + double AV_alpha_xr1, AV_beta_xr1, AV_tau_xr1, AV_alpha_xr2, AV_beta_xr2, AV_tau_xr2, AV_alpha_xs, AV_beta_xs, AV_tau_xs; + double AV_tau_r, AV_tau_s; + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // # Linear in gating variables + + // # /* L_type_Ca_current_d_gate */ + AV_alpha_d = 1.4 / (1.0 + exp(((-35.0) - NV_Ith_S(y, 0)) / 13.0)) + 0.25; + AV_beta_d = 1.4 / (1.0 + exp((NV_Ith_S(y, 0) + 5.0) / 5.0)); + yinf_ptrs[7][j] = 1.0 / (1.0 + exp(((-8.0) - NV_Ith_S(y, 0)) / 7.5)); + AV_gamma_d = 1.0 / (1.0 + exp((50.0 - NV_Ith_S(y, 0)) / 20.0)); + AV_tau_d = 1.0 * AV_alpha_d * AV_beta_d + AV_gamma_d; + lmbda_ptrs[7][j] = -1. / AV_tau_d; + + // # /* L_type_Ca_current_f2_gate */ + yinf_ptrs[9][j] = 0.67 / (1.0 + exp((NV_Ith_S(y, 0) + 35.0) / 7.0)) + 0.33; + AV_tau_f2 = 562.0 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 240.0) + 31.0 / (1.0 + exp((25.0 - NV_Ith_S(y, 0)) / 10.0)) + 80.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)); + lmbda_ptrs[9][j] = -1. / AV_tau_f2; + + // # /* L_type_Ca_current_fCass_gate */ + yinf_ptrs[10][j] = 0.6 / (1.0 + pow(NV_Ith_S(y, 15) / 0.05, 2.0)) + 0.4; + AV_tau_fCass = 80.0 / (1.0 + pow(NV_Ith_S(y, 15) / 0.05, 2.0)) + 2.0; + lmbda_ptrs[10][j] = -1. / AV_tau_fCass; + + // # /* L_type_Ca_current_f_gate */ + yinf_ptrs[8][j] = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 20.0) / 7.0)); + AV_tau_f = 1102.5 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 225.0) + 200.0 / (1.0 + exp((13.0 - NV_Ith_S(y, 0)) / 10.0)) + 180.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)) + 20.0; + lmbda_ptrs[8][j] = -1. / AV_tau_f; + + // # /* fast_sodium_current_h_gate */ + AV_alpha_h = 0.0; + AV_beta_h = 0.77 / (0.13 * (1.0 + exp((NV_Ith_S(y, 0) + 10.66) / (-11.1)))); + yinf_ptrs[5][j] = 1.0 / pow(1.0 + exp((NV_Ith_S(y, 0) + 71.55) / 7.43), 2.0); + AV_tau_h = 1.0 / (AV_alpha_h + AV_beta_h); + lmbda_ptrs[5][j] = -1. / AV_tau_h; + + // # /* fast_sodium_current_j_gate */ + AV_alpha_j = 0.0; + AV_beta_j = 0.6 * exp(0.057 * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1) * (NV_Ith_S(y, 0) + 32.0))); + yinf_ptrs[6][j] = 1.0 / pow(1.0 + exp((NV_Ith_S(y, 0) + 71.55) / 7.43), 2.0); + AV_tau_j = 1.0 / (AV_alpha_j + AV_beta_j); + lmbda_ptrs[6][j] = -1. / AV_tau_j; + + // # /* fast_sodium_current_m_gate */ + AV_alpha_m = 1.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 5.0)); + AV_beta_m = 0.1 / (1.0 + exp((NV_Ith_S(y, 0) + 35.0) / 5.0)) + 0.1 / (1.0 + exp((NV_Ith_S(y, 0) - 50.0) / 200.0)); + yinf_ptrs[4][j] = 1.0 / pow(1.0 + exp(((-56.86) - NV_Ith_S(y, 0)) / 9.03), 2.0); + AV_tau_m = 1.0 * AV_alpha_m * AV_beta_m; + lmbda_ptrs[4][j] = -1. / AV_tau_m; + + // # /* rapid_time_dependent_potassium_current_Xr1_gate */ + AV_alpha_xr1 = 450.0 / (1.0 + exp(((-45.0) - NV_Ith_S(y, 0)) / 10.0)); + AV_beta_xr1 = 6.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 11.5)); + yinf_ptrs[1][j] = 1.0 / (1.0 + exp(((-26.0) - NV_Ith_S(y, 0)) / 7.0)); + AV_tau_xr1 = 1.0 * AV_alpha_xr1 * AV_beta_xr1; + lmbda_ptrs[1][j] = -1. / AV_tau_xr1; + + // # /* rapid_time_dependent_potassium_current_Xr2_gate */ + AV_alpha_xr2 = 3.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 20.0)); + AV_beta_xr2 = 1.12 / (1.0 + exp((NV_Ith_S(y, 0) - 60.0) / 20.0)); + yinf_ptrs[2][j] = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 88.0) / 24.0)); + AV_tau_xr2 = 1.0 * AV_alpha_xr2 * AV_beta_xr2; + lmbda_ptrs[2][j] = -1. / AV_tau_xr2; + + // # /* slow_time_dependent_potassium_current_Xs_gate */ + AV_alpha_xs = 1400.0 / sqrt(1.0 + exp((5.0 - NV_Ith_S(y, 0)) / 6.0)); + AV_beta_xs = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) - 35.0) / 15.0)); + yinf_ptrs[3][j] = 1.0 / (1.0 + exp(((-5.0) - NV_Ith_S(y, 0)) / 14.0)); + AV_tau_xs = 1.0 * AV_alpha_xs * AV_beta_xs + 80.0; + lmbda_ptrs[3][j] = -1. / AV_tau_xs; + + // # /* transient_outward_current_r_gate */ + yinf_ptrs[12][j] = 1.0 / (1.0 + exp((20.0 - NV_Ith_S(y, 0)) / 6.0)); + AV_tau_r = 9.5 * exp((-pow(NV_Ith_S(y, 0) + 40.0, 2.0)) / 1800.0) + 0.8; + lmbda_ptrs[12][j] = -1. / AV_tau_r; + + // # /* transient_outward_current_s_gate */ + yinf_ptrs[11][j] = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) + 20.0) / 5.0)); + AV_tau_s = 85.0 * exp((-pow(NV_Ith_S(y, 0) + 45.0, 2.0)) / 320.0) + 5.0 / (1.0 + exp((NV_Ith_S(y, 0) - 20.0) / 5.0)) + 3.0; + lmbda_ptrs[11][j] = -1. / AV_tau_s; + } +} + +void TenTusscher2006_epi_smooth::lmbda_exp(py::array_t &y_list, py::array_t &lmbda_list) +{ + double *y_ptrs[size]; + double *lmbda_ptrs[size]; + size_t N; + size_t n_dofs; + get_raw_data(y_list, y_ptrs, N, n_dofs); + get_raw_data(lmbda_list, lmbda_ptrs, N, n_dofs); + + double y[size]; + double AV_alpha_d, AV_beta_d, AV_gamma_d, AV_tau_d, AV_tau_f2, AV_tau_fCass, AV_tau_f; + double AV_alpha_h, AV_beta_h, AV_tau_h, AV_alpha_j, AV_beta_j, AV_tau_j, AV_alpha_m, AV_beta_m, AV_tau_m; + double AV_alpha_xr1, AV_beta_xr1, AV_tau_xr1, AV_alpha_xr2, AV_beta_xr2, AV_tau_xr2, AV_alpha_xs, AV_beta_xs, AV_tau_xs; + double AV_tau_r, AV_tau_s; + // Remember to scale the first variable!!! + for (unsigned j = 0; j < n_dofs; j++) + { + for (unsigned i = 0; i < size; i++) + y[i] = y_ptrs[i][j]; + + // # Linear in gating variables + + // # /* L_type_Ca_current_d_gate */ + AV_alpha_d = 1.4 / (1.0 + exp(((-35.0) - NV_Ith_S(y, 0)) / 13.0)) + 0.25; + AV_beta_d = 1.4 / (1.0 + exp((NV_Ith_S(y, 0) + 5.0) / 5.0)); + AV_gamma_d = 1.0 / (1.0 + exp((50.0 - NV_Ith_S(y, 0)) / 20.0)); + AV_tau_d = 1.0 * AV_alpha_d * AV_beta_d + AV_gamma_d; + lmbda_ptrs[7][j] = -1. / AV_tau_d; + + // # /* L_type_Ca_current_f2_gate */ + AV_tau_f2 = 562.0 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 240.0) + 31.0 / (1.0 + exp((25.0 - NV_Ith_S(y, 0)) / 10.0)) + 80.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)); + lmbda_ptrs[9][j] = -1. / AV_tau_f2; + + // # /* L_type_Ca_current_fCass_gate */ + AV_tau_fCass = 80.0 / (1.0 + pow(NV_Ith_S(y, 15) / 0.05, 2.0)) + 2.0; + lmbda_ptrs[10][j] = -1. / AV_tau_fCass; + + // # /* L_type_Ca_current_f_gate */ + AV_tau_f = 1102.5 * exp((-pow(NV_Ith_S(y, 0) + 27.0, 2.0)) / 225.0) + 200.0 / (1.0 + exp((13.0 - NV_Ith_S(y, 0)) / 10.0)) + 180.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 10.0)) + 20.0; + lmbda_ptrs[8][j] = -1. / AV_tau_f; + + // # /* fast_sodium_current_h_gate */ + AV_alpha_h = 0.0; + AV_beta_h = 0.77 / (0.13 * (1.0 + exp((NV_Ith_S(y, 0) + 10.66) / (-11.1)))); + AV_tau_h = 1.0 / (AV_alpha_h + AV_beta_h); + lmbda_ptrs[5][j] = -1. / AV_tau_h; + + // # /* fast_sodium_current_j_gate */ + AV_alpha_j = 0.0; + AV_beta_j = 0.6 * exp(0.057 * NV_Ith_S(y, 0)) / (1.0 + exp((-0.1) * (NV_Ith_S(y, 0) + 32.0))); + AV_tau_j = 1.0 / (AV_alpha_j + AV_beta_j); + lmbda_ptrs[6][j] = -1. / AV_tau_j; + + // # /* fast_sodium_current_m_gate */ + AV_alpha_m = 1.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 5.0)); + AV_beta_m = 0.1 / (1.0 + exp((NV_Ith_S(y, 0) + 35.0) / 5.0)) + 0.1 / (1.0 + exp((NV_Ith_S(y, 0) - 50.0) / 200.0)); + AV_tau_m = 1.0 * AV_alpha_m * AV_beta_m; + lmbda_ptrs[4][j] = -1. / AV_tau_m; + + // # /* rapid_time_dependent_potassium_current_Xr1_gate */ + AV_alpha_xr1 = 450.0 / (1.0 + exp(((-45.0) - NV_Ith_S(y, 0)) / 10.0)); + AV_beta_xr1 = 6.0 / (1.0 + exp((NV_Ith_S(y, 0) + 30.0) / 11.5)); + AV_tau_xr1 = 1.0 * AV_alpha_xr1 * AV_beta_xr1; + lmbda_ptrs[1][j] = -1. / AV_tau_xr1; + + // # /* rapid_time_dependent_potassium_current_Xr2_gate */ + AV_alpha_xr2 = 3.0 / (1.0 + exp(((-60.0) - NV_Ith_S(y, 0)) / 20.0)); + AV_beta_xr2 = 1.12 / (1.0 + exp((NV_Ith_S(y, 0) - 60.0) / 20.0)); + AV_tau_xr2 = 1.0 * AV_alpha_xr2 * AV_beta_xr2; + lmbda_ptrs[2][j] = -1. / AV_tau_xr2; + + // # /* slow_time_dependent_potassium_current_Xs_gate */ + AV_alpha_xs = 1400.0 / sqrt(1.0 + exp((5.0 - NV_Ith_S(y, 0)) / 6.0)); + AV_beta_xs = 1.0 / (1.0 + exp((NV_Ith_S(y, 0) - 35.0) / 15.0)); + AV_tau_xs = 1.0 * AV_alpha_xs * AV_beta_xs + 80.0; + lmbda_ptrs[3][j] = -1. / AV_tau_xs; + + // # /* transient_outward_current_r_gate */ + AV_tau_r = 9.5 * exp((-pow(NV_Ith_S(y, 0) + 40.0, 2.0)) / 1800.0) + 0.8; + lmbda_ptrs[12][j] = -1. / AV_tau_r; + + // # /* transient_outward_current_s_gate */ + AV_tau_s = 85.0 * exp((-pow(NV_Ith_S(y, 0) + 45.0, 2.0)) / 320.0) + 5.0 / (1.0 + exp((NV_Ith_S(y, 0) - 20.0) / 5.0)) + 3.0; + lmbda_ptrs[11][j] = -1. / AV_tau_s; + } +} + +double TenTusscher2006_epi_smooth::rho_f_expl() +{ + return 6.5; +} + +#endif \ No newline at end of file diff --git a/pySDC/projects/Monodomain/problem_classes/space_discretizazions/Parabolic_DCT.py b/pySDC/projects/Monodomain/problem_classes/space_discretizazions/Parabolic_DCT.py new file mode 100644 index 0000000000..20156b95c7 --- /dev/null +++ b/pySDC/projects/Monodomain/problem_classes/space_discretizazions/Parabolic_DCT.py @@ -0,0 +1,340 @@ +import numpy as np +import scipy as sp +from pySDC.core.Common import RegisterParams +from pySDC.implementations.datatype_classes.mesh import mesh +from pathlib import Path +import os + + +class Parabolic_DCT(RegisterParams): + """ + A class for the spatial discretization of the parabolic part of the monodomain equation. + Here we discretize the spatial domain with a uniform mesh and use the discrete cosine transform (DCT) + to discretize the Laplacian operator. The DCT is a real-to-real type of Fourier transform that is well suited for + Neumann boundary conditions. + + Parameters: + ----------- + problem_params: dict containing the problem parameters + + Attributes: + ----------- + chi: float + Surface-to-volume ratio of the cell membrane + Cm: float + Membrane capacitance + si_l: float + Longitudinal intracellular conductivity + se_l: float + Longitudinal extracellular conductivity + si_t: float + Transversal intracellular conductivity + se_t: float + Transversal extracellular conductivity + sigma_l: float + Longitudinal conductivity + sigma_t: float + Transversal conductivity + diff_l: float + Longitudinal diffusion coefficient + diff_t: float + Transversal diffusion coefficient + diff: tuple of floats + Tuple containing the diffusion coefficients + dom_size: tuple of floats + Tuple containing the domain size + n_elems: tuple of ints + Tuple containing the number of elements in each direction + refinements: int + Number of refinements with respect to a baseline mesh. Can be both positive (to get finer meshes) and negative (to get coarser meshes). + grids: tuple of 1D arrays + Tuple containing the grid points in each direction + dx: tuple of floats + Tuple containing the grid spacings in each direction + shape: tuple of ints + Tuple containing the number of grid points in each direction. Same as n_elems with with reversed order. + n_dofs: int + Total number of degrees of freedom + dim: int + Dimension of the spatial domain. Can be 1, 2 or 3. + init: tuple + Shape of the mesh, None and data type of the mesh (np.double) + mesh_name: str + Name of the mesh. Can be cube_ND, cubdoid_ND, cubdoid_ND_smaller, cubdoid_ND_small, cubdoid_ND_large, cubdoid_ND_very_large. Where N=1,2,3. + diff_dct: array + Array containing the discrete Laplacian operator + output_folder: Path + Path to the output folder + output_file_path: Path + Path to the output file + output_file: str + Name of the output file + enable_output: bool + If True, the solution is written to file. Else not. + t_out: list + List containing the output times + order: int + Order of the spatial discretization. Can be 2 or 4 + zero_stim_vec: float + Used to apply zero stimili. + """ + + def __init__(self, **problem_params): + self._makeAttributeAndRegister(*problem_params.keys(), localVars=problem_params, readOnly=True) + + self.define_domain() + self.define_coefficients() + self.define_diffusion() + self.define_stimulus() + + def __del__(self): + if self.enable_output: + # Close the output file + self.output_file.close() + # Save the output times and the grid points + with open( + self.output_file_path.parent / Path(self.output_file_name + '_txyz').with_suffix(".npy"), 'wb' + ) as f: + np.save(f, np.array(self.t_out)) + xyz = self.grids + for i in range(self.dim): + np.save(f, xyz[i]) + + @property + def mesh_name(self): + return "ref_" + str(self.refinements) + + def define_coefficients(self): + self.chi = 140.0 # mm^-1 + self.Cm = 0.01 # uF/mm^2 + self.si_l = 0.17 # mS/mm + self.se_l = 0.62 # mS/mm + self.si_t = 0.019 # mS/mm + self.se_t = 0.24 # mS/mm + + if "cube" in self.domain_name: + # For this domain we use isotropic conductivities + self.si_t = self.si_l + self.se_t = self.se_l + + self.sigma_l = self.si_l * self.se_l / (self.si_l + self.se_l) + self.sigma_t = self.si_t * self.se_t / (self.si_t + self.se_t) + self.diff_l = self.sigma_l / self.chi / self.Cm + self.diff_t = self.sigma_t / self.chi / self.Cm + + if self.dim == 1: + self.diff = (self.diff_l,) + elif self.dim == 2: + self.diff = (self.diff_l, self.diff_t) + else: + self.diff = (self.diff_l, self.diff_t, self.diff_t) + + def define_domain(self): + if "cube" in self.domain_name: + self.dom_size = (100.0, 100.0, 100.0) + self.dim = int(self.domain_name[5]) + else: # cuboid + if "smaller" in self.domain_name: + self.dom_size = (10.0, 4.5, 2.0) + elif "small" in self.domain_name: + self.dom_size = (5.0, 3.0, 1.0) + elif "very_large" in self.domain_name: + self.dom_size = (280.0, 112.0, 48.0) + elif "large" in self.domain_name: + self.dom_size = (60.0, 21.0, 9.0) + else: + self.dom_size = (20.0, 7.0, 3.0) + self.dim = int(self.domain_name[7]) + + self.dom_size = self.dom_size[: self.dim] + self.n_elems = [int(2 ** np.round(np.log2(5.0 * L * 2**self.refinements))) for L in self.dom_size] + self.grids, self.dx = self.get_grids_dx(self.dom_size, self.n_elems) + + self.shape = tuple(np.flip([x.size for x in self.grids])) + self.n_dofs = int(np.prod(self.shape)) + self.init = ((self.n_dofs,), None, np.dtype('float64')) + + def define_diffusion(self): + N = self.n_elems + dx = self.dx + dim = len(N) + if self.order == 2: + diff_dct = self.diff[0] * (2.0 * np.cos(np.pi * np.arange(N[0]) / N[0]) - 2.0) / dx[0] ** 2 + if dim >= 2: + diff_dct = ( + diff_dct[None, :] + + self.diff[1] + * np.array((2.0 * np.cos(np.pi * np.arange(N[1]) / N[1]) - 2.0) / dx[1] ** 2)[:, None] + ) + if dim >= 3: + diff_dct = ( + diff_dct[None, :, :] + + self.diff[2] + * np.array((2.0 * np.cos(np.pi * np.arange(N[2]) / N[2]) - 2.0) / dx[2] ** 2)[:, None, None] + ) + elif self.order == 4: + diff_dct = ( + self.diff[0] + * ( + (-1.0 / 6.0) * np.cos(2.0 * np.pi * np.arange(N[0]) / N[0]) + + (8.0 / 3.0) * np.cos(np.pi * np.arange(N[0]) / N[0]) + - 2.5 + ) + / dx[0] ** 2 + ) + if dim >= 2: + diff_dct = ( + diff_dct[None, :] + + self.diff[1] + * np.array( + ( + (-1.0 / 6.0) * np.cos(2.0 * np.pi * np.arange(N[1]) / N[1]) + + (8.0 / 3.0) * np.cos(np.pi * np.arange(N[1]) / N[1]) + - 2.5 + ) + / dx[1] ** 2 + )[:, None] + ) + if dim >= 3: + diff_dct = ( + diff_dct[None, :, :] + + self.diff[2] + * np.array( + ( + (-1.0 / 6.0) * np.cos(2.0 * np.pi * np.arange(N[2]) / N[2]) + + (8.0 / 3.0) * np.cos(np.pi * np.arange(N[2]) / N[2]) + - 2.5 + ) + / dx[2] ** 2 + )[:, None, None] + ) + else: + raise NotImplementedError("Only order 2 and 4 are implemented for Parabolic_DCT.") + + self.diff_dct = diff_dct + + def grids_from_x(self, x): + dim = len(x) + if dim == 1: + return (x[0],) + elif dim == 2: + return (x[0][None, :], x[1][:, None]) + elif dim == 3: + return (x[0][None, None, :], x[1][None, :, None], x[2][:, None, None]) + + def get_grids_dx(self, dom_size, N): + # The grid points are the midpoints of the elements, hence x_{n+1/2}=(n+1/2)*dx + # This is needed for the DCT. + x = [np.linspace(0, dom_size[i], 2 * N[i] + 1) for i in range(len(N))] + x = [xi[1::2] for xi in x] + dx = [xi[1] - xi[0] for xi in x] + return self.grids_from_x(x), dx + + def define_stimulus(self): + self.zero_stim_vec = 0.0 + # all remaining stimulus parameters are set in MonodomainODE + + def solve_system(self, rhs, factor, u0, t, u_sol): + """ + Solve the linear system: u_sol = (I - factor * A)^{-1} rhs + + Arguments: + ---------- + rhs: mesh + The right-hand side of the linear system + factor: float + The factor in the linear system multiplying the Laplacian + u0: mesh + The initial guess for the solution. Not used here since we use a direct solver. + t: float + The current time. Not used here since the Laplacian is time-independent. + u_sol: mesh + The vector to store the solution in. + """ + rhs_hat = sp.fft.dctn(rhs.reshape(self.shape)) + u_sol_hat = rhs_hat / (1.0 - factor * self.diff_dct) + u_sol[:] = sp.fft.idctn(u_sol_hat).ravel() + + return u_sol + + def add_disc_laplacian(self, uh, res): + """ + Add the discrete Laplacian operator to res: res += A * uh + """ + res[:] += sp.fft.idctn(self.diff_dct * sp.fft.dctn(uh.reshape(self.shape))).ravel() + + def init_output(self, output_folder): + # Initialize the output parameters and file + self.output_folder = output_folder + self.output_file_path = self.output_folder / Path(self.output_file_name).with_suffix(".npy") + if self.enable_output: + if self.output_file_path.is_file(): + os.remove(self.output_file_path) + if not self.output_folder.is_dir(): + os.makedirs(self.output_folder) + self.output_file = open(self.output_file_path, 'wb') + self.t_out = [] + + def write_solution(self, uh, t): + # Write the solution to file. Meant for visualization, hence we print the time stamp too. + # Usually we only write the first component of the solution, hence the electric potential V and not the ionic model state variables. + if self.enable_output: + np.save(self.output_file, uh.reshape(self.shape)) + self.t_out.append(t) + + def write_reference_solution(self, uh, indeces): + """ + Write the solution to file. This is meant to print solutions that will be used as reference solutions + and compute errors or as well solutions that will be used as initial values for other simulations. + Here we write the model variables listed in indeces. + """ + if self.output_file_path.is_file(): + os.remove(self.output_file_path) + if not self.output_file_path.parent.is_dir(): + os.makedirs(self.output_file_path.parent) + with open(self.output_file_path, 'wb') as file: + [np.save(file, uh[i].reshape(self.shape)) for i in indeces] + + def read_reference_solution(self, uh, indeces, ref_file_name): + """ + Read a reference solution from file. It can be used to set the initial values of a simulation + or to compute errors. + We read the model variables listed in indeces. + """ + if ref_file_name == "": + return False + ref_sol_path = Path(self.output_folder) / Path(ref_file_name).with_suffix(".npy") + if ref_sol_path.is_file(): + with open(ref_sol_path, 'rb') as f: + for i in indeces: + uh[i][:] = np.load(f).ravel() + return True + else: + return False + + def stim_region(self, stim_center, stim_radius): + """ + Define the region where the stimulus is applied, given the center and the radius of the stimulus. + Returns a vector of the same size as the grid, with 1s inside the stimulus region and 0s outside. + """ + grids = self.grids + coord_inside_stim_box = [] + for i in range(len(grids)): + coord_inside_stim_box.append(abs(grids[i] - stim_center[i]) < stim_radius[i]) + + inside_stim_box = True + for i in range(len(grids)): + inside_stim_box = np.logical_and(inside_stim_box, coord_inside_stim_box[i]) + + stim = mesh(self.init) + stim[:] = inside_stim_box.ravel().astype(float) + + return stim + + def compute_errors(self, uh, ref_sol): + # Compute L2 errors with respect to the reference solution + error_L2 = np.linalg.norm(uh - ref_sol) + sol_norm_L2 = np.linalg.norm(ref_sol) + rel_error_L2 = error_L2 / sol_norm_L2 + + return error_L2, rel_error_L2 diff --git a/pySDC/projects/Monodomain/run_scripts/run_MonodomainODE.py b/pySDC/projects/Monodomain/run_scripts/run_MonodomainODE.py new file mode 100644 index 0000000000..c1aa47db27 --- /dev/null +++ b/pySDC/projects/Monodomain/run_scripts/run_MonodomainODE.py @@ -0,0 +1,418 @@ +from pathlib import Path +import numpy as np +from mpi4py import MPI +import logging +import os + +from pySDC.core.Errors import ParameterError + +from pySDC.projects.Monodomain.problem_classes.MonodomainODE import MultiscaleMonodomainODE +from pySDC.projects.Monodomain.hooks.HookClass_pde import pde_hook +from pySDC.projects.Monodomain.hooks.HookClass_post_iter_info import post_iter_info_hook + +from pySDC.helpers.stats_helper import get_sorted + +from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI +from pySDC.implementations.controller_classes.controller_MPI import controller_MPI + +from pySDC.projects.Monodomain.sweeper_classes.exponential_runge_kutta.imexexp_1st_order import ( + imexexp_1st_order as imexexp_1st_order_ExpRK, +) +from pySDC.projects.Monodomain.sweeper_classes.runge_kutta.imexexp_1st_order import imexexp_1st_order + +from pySDC.projects.Monodomain.transfer_classes.TransferVectorOfDCTVectors import TransferVectorOfDCTVectors + +from pySDC.projects.Monodomain.utils.data_management import database + + +def set_logger(controller_params): + logging.basicConfig(level=controller_params["logger_level"]) + hooks_logger = logging.getLogger("hooks") + hooks_logger.setLevel(controller_params["logger_level"]) + + +def get_controller(controller_params, description, time_comm, n_time_ranks, truly_time_parallel): + if truly_time_parallel: + controller = controller_MPI(controller_params=controller_params, description=description, comm=time_comm) + else: + controller = controller_nonMPI( + num_procs=n_time_ranks, controller_params=controller_params, description=description + ) + return controller + + +def print_dofs_stats(time_rank, controller, P, uinit): + tot_dofs = uinit.size + mesh_dofs = uinit.shape[1] + if time_rank == 0: + controller.logger.info(f"Total dofs: {tot_dofs}, mesh dofs = {mesh_dofs}") + + +def get_P_data(controller, truly_time_parallel): + if truly_time_parallel: + P = controller.S.levels[0].prob + else: + P = controller.MS[0].levels[0].prob + # set time parameters + t0 = P.t0 + Tend = P.Tend + uinit = P.initial_value() + return t0, Tend, uinit, P + + +def get_comms(n_time_ranks, truly_time_parallel): + if truly_time_parallel: + time_comm = MPI.COMM_WORLD + time_rank = time_comm.Get_rank() + assert time_comm.Get_size() == n_time_ranks, "Number of time ranks does not match the number of MPI ranks" + else: + time_comm = None + time_rank = 0 + return time_comm, time_rank + + +def get_base_transfer_params(finter): + base_transfer_params = dict() + base_transfer_params["finter"] = finter + return base_transfer_params + + +def get_controller_params(problem_params, n_time_ranks): + controller_params = dict() + controller_params["predict_type"] = "pfasst_burnin" if n_time_ranks > 1 else None + controller_params["log_to_file"] = False + controller_params["fname"] = problem_params["output_root"] + "controller" + controller_params["logger_level"] = 20 + controller_params["dump_setup"] = False + if n_time_ranks == 1: + controller_params["hook_class"] = [post_iter_info_hook, pde_hook] + else: + controller_params["hook_class"] = [post_iter_info_hook] + return controller_params + + +def get_description( + integrator, problem_params, sweeper_params, level_params, step_params, base_transfer_params, space_transfer_class +): + description = dict() + + problem = MultiscaleMonodomainODE + + if integrator == "IMEXEXP": + # implicit-explicit-exponential integrators in the preconditioner and standard SDC + description["sweeper_class"] = imexexp_1st_order + elif integrator == "IMEXEXP_EXPRK": + # implicit-explicit-exponential integrators in the preconditioner and exponential SDC + description["sweeper_class"] = imexexp_1st_order_ExpRK + else: + raise ParameterError("Unknown integrator.") + + description["problem_class"] = problem + description["problem_params"] = problem_params + description["sweeper_params"] = sweeper_params + description["level_params"] = level_params + description["step_params"] = step_params + description["base_transfer_params"] = base_transfer_params + description["space_transfer_class"] = space_transfer_class + + return description + + +def get_step_params(maxiter): + step_params = dict() + step_params["maxiter"] = maxiter + return step_params + + +def get_level_params(dt, nsweeps, restol, n_time_ranks): + # initialize level parameters + level_params = dict() + level_params["restol"] = restol + level_params["dt"] = dt + level_params["nsweeps"] = nsweeps + level_params["residual_type"] = "full_rel" + level_params["parallel"] = n_time_ranks > 1 + + return level_params + + +def get_sweeper_params(num_nodes, skip_residual_computation): + # initialize sweeper parameters + sweeper_params = dict() + sweeper_params["initial_guess"] = "spread" + sweeper_params["quad_type"] = "RADAU-RIGHT" + sweeper_params["num_nodes"] = num_nodes + sweeper_params["QI"] = "IE" + if skip_residual_computation: + sweeper_params["skip_residual_computation"] = ("IT_FINE", "IT_COARSE", "IT_DOWN", "IT_UP") + + return sweeper_params + + +def get_space_tranfer_params(): + + space_transfer_class = TransferVectorOfDCTVectors + + return space_transfer_class + + +def get_problem_params( + domain_name, + refinements, + ionic_model_name, + read_init_val, + init_time, + enable_output, + end_time, + order, + output_root, + output_file_name, + ref_sol, +): + # initialize problem parameters + problem_params = dict() + problem_params["order"] = order # order of the spatial discretization + problem_params["refinements"] = refinements # number of refinements with respect to a baseline + problem_params["domain_name"] = ( + domain_name # name of the domain: cube_1D, cube_2D, cube_3D, cuboid_1D, cuboid_2D, cuboid_3D, cuboid_1D_small, cuboid_2D_small, cuboid_3D_small + ) + problem_params["ionic_model_name"] = ( + ionic_model_name # name of the ionic model: HH, CRN, TTP, TTP_SMOOTH for Hodgkin-Huxley, Courtemanche-Ramirez-Nattel, Ten Tusscher-Panfilov and a smoothed version of Ten Tusscher-Panfilov + ) + problem_params["read_init_val"] = ( + read_init_val # read the initial value from file (True) or initiate an action potential with a stimulus (False) + ) + problem_params["init_time"] = ( + init_time # stimulus happpens at t=0 and t=1000 and lasts 2ms. If init_time>2 nothing happens up to t=1000. If init_time>1002 nothing happens, never. + ) + problem_params["init_val_name"] = "init_val_DCT" # name of the file containing the initial value + problem_params["enable_output"] = ( + enable_output # activate or deactivate output (that can be visualized with visualization/show_monodomain_sol.py) + ) + problem_params["output_V_only"] = ( + True # output only the transmembrane potential (V) and not the ionic model variables + ) + executed_file_dir = os.path.dirname(os.path.realpath(__file__)) + problem_params["output_root"] = ( + executed_file_dir + "/../../../../data/" + output_root + ) # output root folder. A hierarchy of folders is created in this folder, as root/domain_name/ref_+str(refinements)/ionic_model_name. Initial values are put here + problem_params["output_file_name"] = output_file_name + problem_params["ref_sol"] = ref_sol # reference solution file name + problem_params["end_time"] = end_time + Path(problem_params["output_root"]).mkdir(parents=True, exist_ok=True) + + return problem_params + + +def setup_and_run( + integrator, + num_nodes, + skip_residual_computation, + num_sweeps, + max_iter, + dt, + restol, + domain_name, + refinements, + order, + ionic_model_name, + read_init_val, + init_time, + enable_output, + write_as_reference_solution, + write_all_variables, + output_root, + output_file_name, + ref_sol, + end_time, + truly_time_parallel, + n_time_ranks, + finter, + write_database, +): + + # get time communicator + time_comm, time_rank = get_comms(n_time_ranks, truly_time_parallel) + + # get time integration parameters + # set maximum number of iterations in ESDC/MLESDC/PFASST + step_params = get_step_params(maxiter=max_iter) + # set number of collocation nodes in each level + sweeper_params = get_sweeper_params(num_nodes=num_nodes, skip_residual_computation=skip_residual_computation) + # set step size, number of sweeps per iteration, and residual tolerance for the stopping criterion + level_params = get_level_params( + dt=dt, + nsweeps=num_sweeps, + restol=restol, + n_time_ranks=n_time_ranks, + ) + + # fix enable output to that only finest level has output + n_levels = max(len(refinements), len(num_nodes)) + enable_output = [enable_output] + [False] * (n_levels - 1) + # get problem parameters + problem_params = get_problem_params( + domain_name=domain_name, + refinements=refinements, + ionic_model_name=ionic_model_name, + read_init_val=read_init_val, + init_time=init_time, + enable_output=enable_output, + end_time=end_time, + order=order, + output_root=output_root, + output_file_name=output_file_name, + ref_sol=ref_sol, + ) + + space_transfer_class = get_space_tranfer_params() + + # get remaining prams + base_transfer_params = get_base_transfer_params(finter) + controller_params = get_controller_params(problem_params, n_time_ranks) + description = get_description( + integrator, + problem_params, + sweeper_params, + level_params, + step_params, + base_transfer_params, + space_transfer_class, + ) + set_logger(controller_params) + controller = get_controller(controller_params, description, time_comm, n_time_ranks, truly_time_parallel) + + # get PDE data + t0, Tend, uinit, P = get_P_data(controller, truly_time_parallel) + + # print dofs stats + print_dofs_stats(time_rank, controller, P, uinit) + + # run + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + # write reference solution, to be used later for error computation + if write_as_reference_solution: + P.write_reference_solution(uend, write_all_variables) + + # compute errors, if a reference solution is found + error_availabe, error_L2, rel_error_L2 = P.compute_errors(uend) + + # get some stats + iter_counts = get_sorted(stats, type="niter", sortby="time") + residuals = get_sorted(stats, type="residual_post_iteration", sortby="time") + if time_comm is not None: + iter_counts = time_comm.gather(iter_counts, root=0) + residuals = time_comm.gather(residuals, root=0) + if time_rank == 0: + iter_counts = [item for sublist in iter_counts for item in sublist] + residuals = [item for sublist in residuals for item in sublist] + iter_counts = time_comm.bcast(iter_counts, root=0) + residuals = time_comm.bcast(residuals, root=0) + + iter_counts.sort() + times = [item[0] for item in iter_counts] + niters = [item[1] for item in iter_counts] + + residuals.sort() + residuals_new = [residuals[0][1]] + t = residuals[0][0] + for i in range(1, len(residuals)): + if residuals[i][0] > t + dt / 2.0: + residuals_new.append(residuals[i][1]) + t = residuals[i][0] + residuals = residuals_new + + avg_niters = np.mean(niters) + if time_rank == 0: + controller.logger.info("Mean number of iterations: %4.2f" % avg_niters) + controller.logger.info( + "Std and var for number of iterations: %4.2f -- %4.2f" % (float(np.std(niters)), float(np.var(niters))) + ) + + if write_database and time_rank == 0: + errors = dict() + errors["error_L2"] = error_L2 + errors["rel_error_L2"] = rel_error_L2 + iters_info = dict() + iters_info["avg_niters"] = avg_niters + iters_info["times"] = times + iters_info["niters"] = niters + iters_info["residuals"] = residuals + file_name = P.output_folder / Path(P.output_file_name) + if file_name.with_suffix('.db').is_file(): + os.remove(file_name.with_suffix('.db')) + data_man = database(file_name) + data_man.write_dictionary("errors", errors) + data_man.write_dictionary("iters_info", iters_info) + + return error_L2, rel_error_L2, avg_niters, times, niters, residuals + + +def main(): + # define sweeper parameters + # integrator = "IMEXEXP" + integrator = "IMEXEXP_EXPRK" + num_nodes = [4] + num_sweeps = [1] + + # set step parameters + max_iter = 100 + + # set level parameters + dt = 0.05 + restol = 5e-8 + + # set problem parameters + domain_name = "cube_2D" + refinements = [-1] + order = 4 # 2 or 4 + ionic_model_name = "TTP" + read_init_val = True + init_time = 3.0 + enable_output = False + write_as_reference_solution = False + write_all_variables = False + write_database = False + end_time = 0.05 + output_root = "results_tmp" + output_file_name = "ref_sol" if write_as_reference_solution else "monodomain" + ref_sol = "ref_sol" + skip_residual_computation = False + + finter = False + + # set time parallelism to True or emulated (False) + truly_time_parallel = False + n_time_ranks = 1 + + error_L2, rel_error_L2, avg_niters, times, niters, residuals = setup_and_run( + integrator, + num_nodes, + skip_residual_computation, + num_sweeps, + max_iter, + dt, + restol, + domain_name, + refinements, + order, + ionic_model_name, + read_init_val, + init_time, + enable_output, + write_as_reference_solution, + write_all_variables, + output_root, + output_file_name, + ref_sol, + end_time, + truly_time_parallel, + n_time_ranks, + finter, + write_database, + ) + + +if __name__ == "__main__": + main() diff --git a/pySDC/projects/Monodomain/run_scripts/run_MonodomainODE_cli.py b/pySDC/projects/Monodomain/run_scripts/run_MonodomainODE_cli.py new file mode 100644 index 0000000000..9fca058c78 --- /dev/null +++ b/pySDC/projects/Monodomain/run_scripts/run_MonodomainODE_cli.py @@ -0,0 +1,135 @@ +import argparse +from pySDC.projects.Monodomain.run_scripts.run_MonodomainODE import setup_and_run + + +def list_of_ints(arg): + arg = arg.replace(' ', '') + arg = arg.replace('_', '-') + arg = arg.split(',') + return list(map(int, arg)) + + +# This is to run the MonodomainODE example from the command line +# Pretty much all the parameters can be defined from the command line + +# For the refinements, it is possible to set negative values, which yield a mesh coarser than the baseline. +# To do so in the command line use an underscore _ insteaf of a minus sign -. +# For example, to solve a 3 level example with meshes refinements 1, 0 and -1, use the option --refinements 1,0,_1 + + +def main(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + # define sweeper parameters + parser.add_argument("--integrator", default="IMEXEXP_EXPRK", type=str, help="sweeper name") + parser.add_argument( + "--num_nodes", + default="4", + type=list_of_ints, + help="list of ints (as '5,3', i.e. no brackets): number of collocation nodes per level", + ) + parser.add_argument("--num_sweeps", default="1", type=list_of_ints, help="list of ints: number of sweeps per level") + parser.add_argument( + "--skip_res", + default=False, + action=argparse.BooleanOptionalAction, + help="compute residual only when really needed", + ) + # set step parameters + parser.add_argument("--max_iter", default=100, type=int, help="maximal number of iterations") + # set level parameters + parser.add_argument("--dt", default=0.05, type=float, help="step size") + parser.add_argument("--restol", default=5e-8, type=float, help="residual tolerance") + # problem args + parser.add_argument( + "--domain_name", default="cuboid_2D_small", type=str, help="cuboid_2D, cuboid_3D, truncated_ellipsoid,..." + ) + parser.add_argument( + "--refinements", + default="0", + type=list_of_ints, + help="list of ints: number of refinements per level, with respect to a baseline mesh (negative values yield coarser meshes). For negative values use _ instead of -.", + ) + parser.add_argument( + "--order", default="4", type=list_of_ints, help="list of ints: order of FEM or FD discretization" + ) + parser.add_argument("--ionic_model_name", default="TTP", type=str, help="ionic_model: HH, CRN, TTP") + parser.add_argument( + "--read_init_val", default=False, action=argparse.BooleanOptionalAction, help="read the initial value from file" + ) + parser.add_argument("--init_time", default=0.0, type=float, help="duration of stimulus. -1 means default") + parser.add_argument( + "--enable_output", + default=False, + action=argparse.BooleanOptionalAction, + help="activate or deactivate xdmf output: True or False", + ) + parser.add_argument( + "--write_as_reference_solution", + default=False, + action=argparse.BooleanOptionalAction, + help="write as reference solution: True or False", + ) + parser.add_argument( + "--write_all_variables", + default=False, + action=argparse.BooleanOptionalAction, + help="when write_as_reference_solution=True, write write all variables (True) or only potential V (False)", + ) + parser.add_argument("--end_time", default=1.0, type=float, help="end time. If negative, a default one is used") + parser.add_argument("--output_file_name", default="monodomain", type=str, help="output file name") + parser.add_argument("--ref_sol", default="ref_sol", type=str, help="reference solution file name") + parser.add_argument("--output_root", default="results_tmp/", type=str, help="output root folder") + parser.add_argument( + "--finter", + default=False, + action=argparse.BooleanOptionalAction, + help="in prolong, re-evaluate f (false) or interpolate (true)", + ) + # controller args + parser.add_argument( + "--truly_time_parallel", + default=False, + action=argparse.BooleanOptionalAction, + help="truly time parallel or emulated", + ) + parser.add_argument("--n_time_ranks", default=1, type=int, help="number of time ranks") + + parser.add_argument( + "--write_database", + default=True, + action=argparse.BooleanOptionalAction, + help="save some simulation results in a database", + ) + + args = parser.parse_args() + + error_L2, rel_error_L2, avg_niters, times, niters, residuals = setup_and_run( + args.integrator, + args.num_nodes, + args.skip_res, + args.num_sweeps, + args.max_iter, + args.dt, + args.restol, + args.domain_name, + args.refinements, + args.order, + args.ionic_model_name, + args.read_init_val, + args.init_time, + args.enable_output, + args.write_as_reference_solution, + args.write_all_variables, + args.output_root, + args.output_file_name, + args.ref_sol, + args.end_time, + args.truly_time_parallel, + args.n_time_ranks, + args.finter, + args.write_database, + ) + + +if __name__ == "__main__": + main() diff --git a/pySDC/projects/Monodomain/run_scripts/run_TestODE.py b/pySDC/projects/Monodomain/run_scripts/run_TestODE.py new file mode 100644 index 0000000000..6b27b1aec2 --- /dev/null +++ b/pySDC/projects/Monodomain/run_scripts/run_TestODE.py @@ -0,0 +1,301 @@ +from pathlib import Path +import numpy as np +import logging +import os + +from tqdm import tqdm + +from pySDC.core.Errors import ParameterError + +from pySDC.projects.Monodomain.problem_classes.TestODE import MultiscaleTestODE +from pySDC.projects.Monodomain.transfer_classes.TransferVectorOfDCTVectors import TransferVectorOfDCTVectors + +from pySDC.projects.Monodomain.hooks.HookClass_post_iter_info import post_iter_info_hook + +from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + +from pySDC.projects.Monodomain.sweeper_classes.exponential_runge_kutta.imexexp_1st_order import ( + imexexp_1st_order as imexexp_1st_order_ExpRK, +) +from pySDC.projects.Monodomain.sweeper_classes.runge_kutta.imexexp_1st_order import imexexp_1st_order + +""" +Run the multirate Dahlquist test equation and plot the stability domain of the method. +We vary only the exponential term and the stiff term, while the non stiff term is kept constant (to allow 2D plots). +""" + + +def set_logger(controller_params): + logging.basicConfig(level=controller_params["logger_level"]) + hooks_logger = logging.getLogger("hooks") + hooks_logger.setLevel(controller_params["logger_level"]) + + +def get_controller(controller_params, description, n_time_ranks): + controller = controller_nonMPI(num_procs=n_time_ranks, controller_params=controller_params, description=description) + return controller + + +def get_P_data(controller): + P = controller.MS[0].levels[0].prob + # set time parameters + t0 = P.t0 + Tend = P.Tend + uinit = P.initial_value() + return t0, Tend, uinit, P + + +def get_base_transfer_params(): + base_transfer_params = dict() + base_transfer_params["finter"] = False + return base_transfer_params + + +def get_controller_params(output_root, logger_level): + controller_params = dict() + controller_params["predict_type"] = "pfasst_burnin" + controller_params["log_to_file"] = False + controller_params["fname"] = output_root + "controller" + controller_params["logger_level"] = logger_level + controller_params["dump_setup"] = False + controller_params["hook_class"] = [post_iter_info_hook] + return controller_params + + +def get_description( + integrator, problem_params, sweeper_params, level_params, step_params, base_transfer_params, space_transfer_class +): + description = dict() + + problem = MultiscaleTestODE + + if integrator == "IMEXEXP": + description["sweeper_class"] = imexexp_1st_order + elif integrator == "IMEXEXP_EXPRK": + description["sweeper_class"] = imexexp_1st_order_ExpRK + else: + raise ParameterError("Unknown integrator.") + + description["problem_class"] = problem + description["problem_params"] = problem_params + description["sweeper_params"] = sweeper_params + description["level_params"] = level_params + description["step_params"] = step_params + description["base_transfer_params"] = base_transfer_params + description["space_transfer_class"] = space_transfer_class + return description + + +def get_step_params(maxiter): + step_params = dict() + step_params["maxiter"] = maxiter + return step_params + + +def get_level_params(dt, nsweeps, restol): + # initialize level parameters + level_params = dict() + level_params["restol"] = restol + level_params["dt"] = dt + level_params["nsweeps"] = nsweeps + level_params["residual_type"] = "full_rel" + return level_params + + +def get_sweeper_params(num_nodes): + # initialize sweeper parameters + sweeper_params = dict() + sweeper_params["initial_guess"] = "spread" + sweeper_params["quad_type"] = "RADAU-RIGHT" + sweeper_params["num_nodes"] = num_nodes + sweeper_params["QI"] = "IE" + + return sweeper_params + + +def get_output_root(): + executed_file_dir = os.path.dirname(os.path.realpath(__file__)) + output_root = executed_file_dir + "/../../../../data/Monodomain/results_tmp" + return output_root + + +def get_problem_params(lmbda_laplacian, lmbda_gating, lmbda_others, end_time): + # initialize problem parameters + problem_params = dict() + problem_params["output_file_name"] = "monodomain" + problem_params["output_root"] = get_output_root() + problem_params["end_time"] = end_time + problem_params["lmbda_laplacian"] = lmbda_laplacian + problem_params["lmbda_gating"] = lmbda_gating + problem_params["lmbda_others"] = lmbda_others + Path(problem_params["output_root"]).mkdir(parents=True, exist_ok=True) + return problem_params + + +def plot_stability_domain(lmbda_laplacian_list, lmbda_gating_list, R, integrator, num_nodes, n_time_ranks): + import matplotlib.pyplot as plt + from matplotlib.colors import LogNorm + import pySDC.helpers.plot_helper as plt_helper + + plt_helper.setup_mpl() + + # fig, ax = plt_helper.newfig(textwidth=400, scale=0.89, ratio=0.5) + # fig, ax = plt_helper.newfig(textwidth=238.96, scale=0.89) + fig, ax = plt_helper.plt.subplots( + figsize=plt_helper.figsize(textwidth=400, scale=1.0, ratio=0.78), layout='constrained' + ) + + fs_label = 14 + fs_ticks = 12 + fs_title = 16 + X, Y = np.meshgrid(lmbda_gating_list, lmbda_laplacian_list) + R = np.abs(R) + CS = ax.contourf(X, Y, R, cmap=plt.cm.viridis, levels=np.logspace(-6, 0, 13), norm=LogNorm()) + ax.plot(lmbda_gating_list, 0 * lmbda_gating_list, 'k--', linewidth=1.0) + ax.plot(0 * lmbda_laplacian_list, lmbda_laplacian_list, 'k--', linewidth=1.0) + ax.contour(CS, levels=CS.levels, colors='black') + ax.set_xlabel(r'$z_{e}$', fontsize=fs_label, labelpad=-5) + ax.set_ylabel(r'$z_{I}$', fontsize=fs_label, labelpad=-10) + ax.tick_params(axis='x', labelsize=fs_ticks) + ax.tick_params(axis='y', labelsize=fs_ticks) + if len(num_nodes) == 1 and n_time_ranks == 1: + prefix = "" + elif len(num_nodes) > 1 and n_time_ranks == 1: + prefix = "ML" + elif len(num_nodes) > 1 and n_time_ranks > 1: + prefix = "PFASST " + if integrator == "IMEXEXP": + ax.set_title(prefix + "SDC stability domain", fontsize=fs_title) + elif integrator == "IMEXEXP_EXPRK": + ax.set_title(prefix + "ESDC stability domain", fontsize=fs_title) + ax.yaxis.tick_right() + ax.yaxis.set_label_position("right") + cbar = fig.colorbar(CS) + cbar.ax.set_ylabel(r'$|R(z_e,z_{I})|$', fontsize=fs_label, labelpad=-20) + cbar.set_ticks([cbar.vmin, cbar.vmax]) # keep only the ticks at the ends + cbar.ax.tick_params(labelsize=fs_ticks) + # plt_helper.plt.show() + plt_helper.savefig("data/stability_domain_" + integrator, save_pdf=False, save_pgf=False, save_png=True) + + +def main(integrator, dl, l_min, openmp, n_time_ranks, end_time, num_nodes, check_stability): + + # get time integration parameters + # set maximum number of iterations in SDC/ESDC/MLSDC/etc + step_params = get_step_params(maxiter=5) + # set number of collocation nodes in each level + sweeper_params = get_sweeper_params(num_nodes=num_nodes) + # set step size, number of sweeps per iteration, and residual tolerance for the stopping criterion + level_params = get_level_params(dt=1.0, nsweeps=[1], restol=5e-8) + # set space transfer parameters + # space_transfer_class = Transfer_myfloat + space_transfer_class = TransferVectorOfDCTVectors + base_transfer_params = get_base_transfer_params() + controller_params = get_controller_params(get_output_root(), logger_level=40) + + # set stability test parameters + lmbda_others = -1.0 # the non stiff term + lmbda_laplacian_min = l_min # the stiff term + lmbda_laplacian_max = 0.0 + lmbda_gating_min = l_min # the exponential term + lmbda_gating_max = 0.0 + + # define the grid for the stability domain + n_lmbda_laplacian = np.round((lmbda_laplacian_max - lmbda_laplacian_min) / dl).astype(int) + 1 + n_lmbda_gating = np.round((lmbda_gating_max - lmbda_gating_min) / dl).astype(int) + 1 + lmbda_laplacian_list = np.linspace(lmbda_laplacian_min, lmbda_laplacian_max, n_lmbda_laplacian) + lmbda_gating_list = np.linspace(lmbda_gating_min, lmbda_gating_max, n_lmbda_gating) + + if not openmp: + R = np.zeros((n_lmbda_laplacian, n_lmbda_gating)) + for i in tqdm(range(n_lmbda_gating)): + for j in range(n_lmbda_laplacian): + lmbda_gating = lmbda_gating_list[i] + lmbda_laplacian = lmbda_laplacian_list[j] + + problem_params = get_problem_params( + lmbda_laplacian=lmbda_laplacian, + lmbda_gating=lmbda_gating, + lmbda_others=lmbda_others, + end_time=end_time, + ) + description = get_description( + integrator, + problem_params, + sweeper_params, + level_params, + step_params, + base_transfer_params, + space_transfer_class, + ) + set_logger(controller_params) + controller = get_controller(controller_params, description, n_time_ranks) + + t0, Tend, uinit, P = get_P_data(controller) + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + R[j, i] = abs(uend) + else: + import pymp + + R = pymp.shared.array((n_lmbda_laplacian, n_lmbda_gating), dtype=float) + with pymp.Parallel(12) as p: + for i in tqdm(p.range(0, n_lmbda_gating)): + for j in range(n_lmbda_laplacian): + lmbda_gating = lmbda_gating_list[i] + lmbda_laplacian = lmbda_laplacian_list[j] + + problem_params = get_problem_params( + lmbda_laplacian=lmbda_laplacian, + lmbda_gating=lmbda_gating, + lmbda_others=lmbda_others, + end_time=end_time, + ) + description = get_description( + integrator, + problem_params, + sweeper_params, + level_params, + step_params, + base_transfer_params, + space_transfer_class, + ) + set_logger(controller_params) + controller = get_controller(controller_params, description, n_time_ranks) + + t0, Tend, uinit, P = get_P_data(controller) + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + R[j, i] = abs(uend) + + plot_stability_domain(lmbda_laplacian_list, lmbda_gating_list, R, integrator, num_nodes, n_time_ranks) + + if check_stability: + assert ( + np.max(np.abs(R.ravel())) <= 1.0 + ), "The maximum absolute value of the stability function is greater than 1.0." + + +if __name__ == "__main__": + # Plot stability for exponential SDC coupled with the implicit-explicit-exponential integrator as preconditioner + main( + integrator="IMEXEXP_EXPRK", + dl=2, + l_min=-100, + openmp=True, + n_time_ranks=1, + end_time=1.0, + num_nodes=[5, 3], + check_stability=True, # check that the stability function is bounded by 1.0 + ) + # Plot stability for standard SDC coupled with the implicit-explicit-exponential integrator as preconditioner + main( + integrator="IMEXEXP", + dl=2, + l_min=-100, + openmp=True, + n_time_ranks=1, + end_time=1.0, + num_nodes=[5, 3], + check_stability=False, # do not check for stability since we already know that the method is not stable + ) diff --git a/pySDC/projects/Monodomain/sweeper_classes/exponential_runge_kutta/imexexp_1st_order.py b/pySDC/projects/Monodomain/sweeper_classes/exponential_runge_kutta/imexexp_1st_order.py new file mode 100644 index 0000000000..25c0813c14 --- /dev/null +++ b/pySDC/projects/Monodomain/sweeper_classes/exponential_runge_kutta/imexexp_1st_order.py @@ -0,0 +1,301 @@ +import numpy as np + +from pySDC.core.Sweeper import sweeper +from pySDC.core.Errors import CollocationError, ParameterError +from pySDC.core.Collocation import CollBase +import numdifftools.fornberg as fornberg +import scipy + + +class imexexp_1st_order(sweeper): + """ + Custom sweeper class, implements Sweeper.py + + First-order IMEXEXP sweeper using implicit/explicit/exponential Euler as base integrator + In the cardiac electrphysiology community this is known as Rush-Larsen scheme. + + The underlying intergrator is exponential Runge-Kutta, leading to exponential SDC (ESDC). + """ + + def __init__(self, params): + """ + Initialization routine for the custom sweeper + + Args: + params: parameters for the sweeper + """ + + if "QI" not in params: + params["QI"] = "IE" + + # call parent's initialization routine + super(imexexp_1st_order, self).__init__(params) + + # IMEX integration matrices + self.QI = self.get_Qdelta_implicit(coll=self.coll, qd_type=self.params.QI) + self.delta = np.diagonal(self.QI)[1:] + + # Compute weights w such that PiQ^(k)(0) = sum_{j=0}^{M-1} w[k,j]*Q[j], k=0,...,M-1 + # Used to express the derivatives of a polynomial in x=0 in terms of the values of the polynomial at the collocation nodes + M = self.coll.num_nodes + c = self.coll.nodes + self.w = fornberg.fd_weights_all(c, 0.0, M - 1).transpose() + + # Define the quadature rule for the evaluation of the phi_i(z) functions. Indeed, we evaluate them as integrals in order to avoid round off errors. + phi_num_nodes = 5 # seems to be enough in most cases + self.phi_coll = CollBase(num_nodes=phi_num_nodes, tleft=0, tright=1, node_type='LEGENDRE', quad_type='GAUSS') + + def phi_eval(self, factors, indeces, phi, lmbda): + """ + Evaluate the phi_k functions at the points factors[i]*lmbda, for all k in indeces + + Arguments: + factors: list of factors to multiply lmbda with. + indeces: list of indeces k for the phi_k functions. Since we use the integral formulation, k=0 is not allowed (not needed neither). + phi: an instance of mesh with shape (len(factors),len(indeces),*lmbda.shape) (i.e., some space to store the results) + it will filled as: phi[i,k][:] = phi_{indeces[k]}(factor[i]*lmbda[:]) + lmbda: dtype_u: the value of lmbda + """ + + assert 0 not in indeces, "phi_0 is not implemented, since the integral definition is not valid for k=0." + + # the quadrature rule used to evaluate the phi functions as integrals. This is not the same as the one used in the ESDC method!!!! + c = self.phi_coll.nodes + b = self.phi_coll.weights + + k = np.array(indeces) + km1_fac = scipy.special.factorial(k - 1) # (k-1)! + + # Here we use the quadrature rule to approximate the integral + # phi_{k}(factor[i]*lmbda[:,:])= \int_0^1 e^{(1-s)*factor[i]*lambda[:,:]}*s^{k-1}/(k-1)! ds + + # First, compute e^((1-c[j])*factor[i]*lmbda[:]) for nodes c[j] on the quadrature rule and all factors[i] + exp_terms = np.exp(((1.0 - c[None, :, None, None]) * factors[:, None, None, None]) * lmbda[None, None, :, :]) + # Then, compute the terms c[j]^{k-1}/(k-1)! for all nodes c[j] and all k and multiply with the weights b[j] + wgt_tmp = (b[:, None] * c[:, None] ** (k[None, :] - 1)) / km1_fac[None, :] + # Finally, compute the integral by summing over the quadrature nodes + phi[:] = np.sum(wgt_tmp[None, :, :, None, None] * exp_terms[:, :, None, :, :], axis=1) + + def compute_lambda_phi_Qmat_exp(self): + + if not hasattr(self, "u_old"): + # make some space for the old value of u[0] + self.u_old = self.level.prob.dtype_u(init=self.level.prob.init, val=0.0) + + # everything that is computed in this if statement depends on u[0] only + # To save computations we recompute that only if u[0] has changed. + # Also, we check only for the first component u[0][0] of u[0] to save more computations. + # Remember that u[0][0] is a vector representing the electric potential on the whole mesh and is enough to check if the whole u[0] has changed. + if not np.allclose(self.u_old[0], self.level.u[0][0], rtol=1e-10, atol=1e-10): + + self.u_old[:] = self.level.u[0] + + L = self.level + P = L.prob + M = self.coll.num_nodes + c = self.coll.nodes + + # compute lambda(u) of the exponential term f_exp(u)=lmbda(u)*(u-y_inf(u)) + # and select only the indeces with exponential terms (others are zeros) + self.lmbda = P.lmbda_eval(L.u[0], L.time)[P.rhs_exp_indeces] + + if not hasattr(self, "phi"): + # make some space + self.phi = P.dtype_u(init=P.init_exp_extruded((M, M)), val=0.0) + self.phi_one = P.dtype_u(init=P.init_exp_extruded((M, 1)), val=0.0) + + # evaluate the phi_k(dt*c_i*lambda) functions at the collocation nodes c_i for k=1,...,M + self.phi_eval(L.dt * c, list(range(1, M + 1)), self.phi, self.lmbda) + # evaluates phi_1(dt*delta_i*lambda) for delta_i = c_i - c_{i-1} + self.phi_eval(L.dt * self.delta, [1], self.phi_one, self.lmbda) + + # compute weight for the integration of \int_0^ci exp(dt*(ci-r)lmbda)*PiQ(r)dr, + # where PiQ(r) is a polynomial interpolating some nodal values Q(c_i)=Q[i]. + # The integral of PiQ will be approximated as: + # \int_0^ci exp(dt*(ci-r)lmbda)*PiQ(r)dr ~= \sum_{j=0}^{M-1} Qmat_exp[i,j]*Q[j] + + k = np.arange(0, M) + wgt_tmp = self.w[None, :, :] * c[:, None, None] ** (k[None, None, :] + 1) + self.Qmat_exp = np.sum(wgt_tmp[:, :, :, None, None] * self.phi[:, None, :, :, :], axis=2) + + def integrate(self): + """ + Integrates the right-hand side (here impl + expl + exp) using exponential Runge-Kutta + + Returns: + list of dtype_u: containing the integral as values + """ + + # get current level and problem description + L = self.level + P = L.prob + M = self.coll.num_nodes + + self.compute_lambda_phi_Qmat_exp() + + if not hasattr(self, "Q"): + self.Q = P.dtype_u(init=P.init_exp_extruded((M,)), val=0.0) + + for k in range(M): + self.Q[k][:] = L.f[k + 1].exp[P.rhs_exp_indeces] + self.lmbda * ( + L.u[0][P.rhs_exp_indeces] - L.u[k + 1][P.rhs_exp_indeces] + ) + + # integrate RHS over all collocation nodes + me = [P.dtype_u(init=P.init, val=0.0) for _ in range(M)] + for m in range(1, M + 1): + for j in range(1, M + 1): + me[m - 1][P.rhs_stiff_indeces] += self.coll.Qmat[m, j] * L.f[j].impl[P.rhs_stiff_indeces] + me[m - 1][P.rhs_nonstiff_indeces] += self.coll.Qmat[m, j] * L.f[j].expl[P.rhs_nonstiff_indeces] + me[m - 1][P.rhs_exp_indeces] += np.sum(self.Qmat_exp[m - 1] * self.Q, axis=0) + + me[m - 1] *= L.dt + + return me + + def update_nodes(self): + """ + Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes + + Returns: + None + """ + + # get current level and problem description + L = self.level + P = L.prob + + # only if the level has been touched before + assert L.status.unlocked + + # get number of collocation nodes for easier access + M = self.coll.num_nodes + + integral = self.integrate() + for m in range(M): + if L.tau[m] is not None: + integral[m] += L.tau[m] + for i in range(1, M): + integral[M - i] -= integral[M - i - 1] + + # prepare the integral term + for m in range(M): + integral[m][P.rhs_stiff_indeces] += -L.dt * self.delta[m] * L.f[m + 1].impl[P.rhs_stiff_indeces] + integral[m][P.rhs_nonstiff_indeces] += -L.dt * self.delta[m] * L.f[m].expl[P.rhs_nonstiff_indeces] + integral[m][P.rhs_exp_indeces] += ( + -L.dt + * self.delta[m] + * self.phi_one[m][0] + * (L.f[m].exp[P.rhs_exp_indeces] + self.lmbda * (L.u[0][P.rhs_exp_indeces] - L.u[m][P.rhs_exp_indeces])) + ) + + # do the sweep + for m in range(M): + + tmp = L.u[m] + integral[m] + tmp[P.rhs_exp_indeces] += ( + L.dt + * self.delta[m] + * self.phi_one[m][0] + * (L.f[m].exp[P.rhs_exp_indeces] + self.lmbda * (L.u[0][P.rhs_exp_indeces] - L.u[m][P.rhs_exp_indeces])) + ) + tmp[P.rhs_nonstiff_indeces] += L.dt * self.delta[m] * L.f[m].expl[P.rhs_nonstiff_indeces] + + # implicit solve with prefactor stemming from QI + L.u[m + 1] = P.solve_system( + tmp, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m], L.u[m + 1] + ) + + # update function values + P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m], fh=L.f[m + 1]) + + # indicate presence of new values at this level + L.status.updated = True + + return None + + def compute_end_point(self): + """ + Compute u at the right point of the interval + + The value uend computed here is a full evaluation of the Picard formulation unless do_full_update==False + + Returns: + None + """ + + # get current level and problem description + L = self.level + P = L.prob + + # check if Mth node is equal to right point and do_coll_update is false, perform a simple copy + if self.coll.right_is_node and not self.params.do_coll_update: + # a copy is sufficient + L.uend = P.dtype_u(L.u[-1]) + else: + raise CollocationError("This option is not implemented yet.") + + return None + + def rel_norm(self, a, b): + norms = [] + for i in range(len(a)): + norms.append(np.linalg.norm(a[i]) / np.linalg.norm(b[i])) + return np.average(norms) + + def compute_residual(self, stage=''): + """ + Computation of the residual using the collocation matrix Q + + Args: + stage (str): The current stage of the step the level belongs to + """ + + # get current level and problem description + L = self.level + + # Check if we want to skip the residual computation to gain performance + # Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual! + if stage in self.params.skip_residual_computation: + L.status.residual = 0.0 if L.status.residual is None else L.status.residual + return None + + # check if there are new values (e.g. from a sweep) + # assert L.status.updated + + # compute the residual for each node + + # build QF(u) + res_norm = [] + rel_res_norm = [] + res = self.integrate() + for m in range(self.coll.num_nodes): + res[m] += L.u[0] + res[m] -= L.u[m + 1] + # add tau if associated + if L.tau[m] is not None: + res[m] += L.tau[m] + # use abs function from data type here + res_norm.append(abs(res[m])) + # the different components of the monodomain equation have very different magnitude therefore we use a tailored relative norm here to avoid the cancellation of the smaller components + rel_res_norm.append(self.rel_norm(res[m], L.u[0])) + + # find maximal residual over the nodes + if L.params.residual_type == 'full_abs': + L.status.residual = max(res_norm) + elif L.params.residual_type == 'last_abs': + L.status.residual = res_norm[-1] + elif L.params.residual_type == 'full_rel': + L.status.residual = max(rel_res_norm) + elif L.params.residual_type == 'last_rel': + L.status.residual = rel_res_norm[-1] + else: + raise ParameterError( + f'residual_type = {L.params.residual_type} not implemented, choose ' + f'full_abs, last_abs, full_rel or last_rel instead' + ) + + # indicate that the residual has seen the new values + L.status.updated = False + + return None diff --git a/pySDC/projects/Monodomain/sweeper_classes/runge_kutta/imexexp_1st_order.py b/pySDC/projects/Monodomain/sweeper_classes/runge_kutta/imexexp_1st_order.py new file mode 100644 index 0000000000..fbd6d45341 --- /dev/null +++ b/pySDC/projects/Monodomain/sweeper_classes/runge_kutta/imexexp_1st_order.py @@ -0,0 +1,145 @@ +import numpy as np + +from pySDC.core.Sweeper import sweeper +from pySDC.core.Errors import CollocationError + + +class imexexp_1st_order(sweeper): + """ + Custom sweeper class, implements Sweeper.py + + First-order IMEXEXP sweeper using implicit/explicit/exponential Euler as base integrator + In the cardiac electrphysiology community this is known as Rush-Larsen scheme. + + """ + + def __init__(self, params): + """ + Initialization routine for the custom sweeper + + Args: + params: parameters for the sweeper + """ + + if "QI" not in params: + params["QI"] = "IE" + + # call parent's initialization routine + super(imexexp_1st_order, self).__init__(params) + + # IMEX integration matrices + self.QI = self.get_Qdelta_implicit(coll=self.coll, qd_type=self.params.QI) + self.delta = np.diagonal(self.QI)[1:] + + def eval_phi_f_exp(self, u, factor): + """ + Evaluates the exponential part of the right-hand side f_exp(u)=lambda(u)*(u-y_inf(u)) multiplied by the exponential factor phi_1(factor*lambda) + Since phi_1(z)=(e^z-1)/z then phi_1(factor*lambda) * f_exp(u) = ((e^(factor*lambda)-1)/factor) *(u-y_inf(u)) + """ + L = self.level + P = L.prob + self.lmbda = P.dtype_u(init=P.init, val=0.0) + self.yinf = P.dtype_u(init=P.init, val=0.0) + P.eval_lmbda_yinf_exp(u, self.lmbda, self.yinf) + phi_f_exp = P.dtype_u(init=P.init, val=0.0) + for i in P.rhs_exp_indeces: + phi_f_exp[i][:] = u[i] - self.yinf[i][:] + phi_f_exp[i][:] *= (np.exp(factor * self.lmbda[i]) - 1.0) / factor + + return phi_f_exp + + def integrate(self): + """ + Integrates the right-hand side (here impl + expl + exp) + + Returns: + list of dtype_u: containing the integral as values + """ + + # get current level and problem description + L = self.level + + me = [] + + # integrate RHS over all collocation nodes + for m in range(1, self.coll.num_nodes + 1): + me.append(L.dt * self.coll.Qmat[m, 1] * (L.f[1].impl + L.f[1].expl + L.f[1].exp)) + for j in range(2, self.coll.num_nodes + 1): + me[m - 1] += L.dt * self.coll.Qmat[m, j] * (L.f[j].impl + L.f[j].expl + L.f[j].exp) + + return me + + def update_nodes(self): + """ + Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes + + Returns: + None + """ + + # get current level and problem description + L = self.level + P = L.prob + + # only if the level has been touched before + assert L.status.unlocked + + # get number of collocation nodes for easier access + M = self.coll.num_nodes + + integral = self.integrate() + for m in range(M): + if L.tau[m] is not None: + integral[m] += L.tau[m] + for i in range(1, M): + integral[M - i] -= integral[M - i - 1] + + # do the sweep + for m in range(M): + integral[m] -= ( + L.dt + * self.delta[m] + * (L.f[m].expl + L.f[m + 1].impl + self.eval_phi_f_exp(L.u[m], L.dt * self.delta[m])) + ) + for m in range(M): + rhs = ( + L.u[m] + + integral[m] + + L.dt * self.delta[m] * (L.f[m].expl + self.eval_phi_f_exp(L.u[m], L.dt * self.delta[m])) + ) + + # implicit solve with prefactor stemming from QI + L.u[m + 1] = P.solve_system(rhs, L.dt * self.delta[m], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) + + # update function values + L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) + + # indicate presence of new values at this level + L.status.updated = True + + return None + + def compute_end_point(self): + """ + Compute u at the right point of the interval + + The value uend computed here is a full evaluation of the Picard formulation unless do_full_update==False + + Returns: + None + """ + + # get current level and problem description + L = self.level + P = L.prob + + # check if Mth node is equal to right point and do_coll_update is false, perform a simple copy + if self.coll.right_is_node and not self.params.do_coll_update: + # a copy is sufficient + L.uend = P.dtype_u(L.u[-1]) + else: + raise CollocationError( + "In this sweeper we expect the right point to be a collocation node and do_coll_update==False" + ) + + return None diff --git a/pySDC/projects/Monodomain/transfer_classes/TransferVectorOfDCTVectors.py b/pySDC/projects/Monodomain/transfer_classes/TransferVectorOfDCTVectors.py new file mode 100644 index 0000000000..3a21390c81 --- /dev/null +++ b/pySDC/projects/Monodomain/transfer_classes/TransferVectorOfDCTVectors.py @@ -0,0 +1,40 @@ +from pySDC.core.SpaceTransfer import space_transfer +from pySDC.projects.Monodomain.transfer_classes.Transfer_DCT_Vector import DCT_to_DCT +from pySDC.implementations.datatype_classes.mesh import mesh +from pySDC.projects.Monodomain.datatype_classes.my_mesh import imexexp_mesh + + +class TransferVectorOfDCTVectors(space_transfer): + """ + This implementation can restrict and prolong VectorOfVectors + """ + + def __init__(self, fine_prob, coarse_prob, params): + # invoke super initialization + super(TransferVectorOfDCTVectors, self).__init__(fine_prob, coarse_prob, params) + + self.DCT_to_DCT = DCT_to_DCT(fine_prob, coarse_prob, params) + + def restrict(self, F): + + u_coarse = mesh(self.coarse_prob.init) + + for i in range(self.coarse_prob.size): + u_coarse[i][:] = self.DCT_to_DCT.restrict(F[i]) + + return u_coarse + + def prolong(self, G): + + if isinstance(G, imexexp_mesh): + u_fine = imexexp_mesh(self.fine_prob.init) + for i in range(self.fine_prob.size): + u_fine.impl[i][:] = self.DCT_to_DCT.prolong(G.impl[i]) + u_fine.expl[i][:] = self.DCT_to_DCT.prolong(G.expl[i]) + u_fine.exp[i][:] = self.DCT_to_DCT.prolong(G.exp[i]) + elif isinstance(G, mesh): + u_fine = mesh(self.fine_prob.init) + for i in range(self.fine_prob.size): + u_fine[i][:] = self.DCT_to_DCT.prolong(G[i]) + + return u_fine diff --git a/pySDC/projects/Monodomain/transfer_classes/Transfer_DCT_Vector.py b/pySDC/projects/Monodomain/transfer_classes/Transfer_DCT_Vector.py new file mode 100644 index 0000000000..f0bf2ca98b --- /dev/null +++ b/pySDC/projects/Monodomain/transfer_classes/Transfer_DCT_Vector.py @@ -0,0 +1,70 @@ +import scipy.fft as fft + +from pySDC.core.SpaceTransfer import space_transfer +from pySDC.implementations.datatype_classes.mesh import mesh + + +class DCT_to_DCT(space_transfer): + """ + Class to transfer data between two meshes using DCT. + Restriction is performed by zeroing out high frequency modes, while prolongation is done by zero-padding. + + Arguments: + ---------- + fine_prob: fine problem + coarse_prob: coarse problem + params: parameters for the transfer operators + """ + + def __init__(self, fine_prob, coarse_prob, params): + + # invoke super initialization + super(DCT_to_DCT, self).__init__(fine_prob, coarse_prob, params) + + self.norm = "forward" + + self.fine_shape = self.fine_prob.parabolic.shape + self.coarse_shape = self.coarse_prob.parabolic.shape + + if self.fine_shape == self.coarse_shape: + self.same_grid = True + else: + self.same_grid = False + + def restrict(self, F): + """ + Restriction opeartor + Args: + F: the fine level data (easier to access than via the fine attribute) + """ + + G = mesh(self.coarse_prob.parabolic.init) + + if self.same_grid: + G[:] = F + else: + + G[:] = fft.idctn( + fft.dctn(F.reshape(self.fine_shape), norm=self.norm), s=self.coarse_shape, norm=self.norm + ).ravel() + + return G + + def prolong(self, G): + """ + Prolongation opeartor + Args: + G: the coarse level data (easier to access than via the coarse attribute) + """ + + F = mesh(self.fine_prob.parabolic.init) + + if self.same_grid: + F[:] = G + else: + + F[:] = fft.idctn( + fft.dctn(G.reshape(self.coarse_shape), norm=self.norm), s=self.fine_shape, norm=self.norm + ).ravel() + + return F diff --git a/pySDC/projects/Monodomain/utils/data_management.py b/pySDC/projects/Monodomain/utils/data_management.py new file mode 100644 index 0000000000..970c1ab6e7 --- /dev/null +++ b/pySDC/projects/Monodomain/utils/data_management.py @@ -0,0 +1,107 @@ +import sqlite3 +import json +import os + + +class database: + def __init__(self, name): + self.name = name + path = os.path.dirname(self.name) + if path != "" and not os.path.exists(path): + os.makedirs(path) + self.conn = sqlite3.connect(f'{self.name}.db') + self.cursor = self.conn.cursor() + + # def write_arrays(self, table, arrays, columns_names=None): + # if not isinstance(arrays, list): + # arrays = [arrays] + # n = len(arrays) + # if columns_names is None: + # columns_names = ["val_" + str(i) for i in range(n)] + + # self.cursor.execute(f'DROP TABLE IF EXISTS {table}') + # self.cursor.execute(f'CREATE TABLE {table} ({self._convert_list_str_to_arg(columns_names)})') + # self.cursor.execute(f'INSERT INTO {table} VALUES ({self._convert_list_str_to_arg(["?"]*n)})', arrays) + # self.conn.commit() + + def write_dictionary(self, table, dic): + self.cursor.execute(f'DROP TABLE IF EXISTS {table}') + self.cursor.execute(f'CREATE TABLE {table} (dic TEXT)') + self.cursor.execute(f'INSERT INTO {table} VALUES (?)', [json.dumps(dic)]) + self.conn.commit() + + # def read_arrays(self, table, columns_names=None, dtype=np.double): + # if columns_names is None: + # self.cursor.execute(f"SELECT * FROM {table}") + # else: + # self.cursor.execute(f"SELECT {self._convert_list_str_to_arg(columns_names)} FROM {table}") + # result = self.cursor.fetchone() + # result_new = list() + # for res in result: + # result_new.append(np.frombuffer(res, dtype=dtype)) + # if len(result_new) > 1: + # return result_new + # else: + # return result_new[0] + + def read_dictionary(self, table): + self.cursor.execute(f"SELECT dic FROM {table}") + (json_dic,) = self.cursor.fetchone() + return json.loads(json_dic) + + def _convert_list_str_to_arg(self, str_list): + return str(str_list).replace("'", "").replace("[", "").replace("]", "") + + def __del__(self): + self.conn.close() + + +# def main(): +# data = database("test") +# a = np.array([1.0, 2.0, 3.0]) +# b = np.array([10.0, 11.0, 12.0]) +# data.write_arrays("ab_table", [a, b], ['a', 'b']) +# data.write_arrays("a_table", [a], ['a']) +# data.write_arrays("b_table", b, 'b') +# data.write_arrays("ab_table_noname", [a, b]) +# data.write_arrays("b_table_noname", b) + +# a_new, b_new = data.read_arrays("ab_table", ['a', 'b']) +# print(a) +# print(a_new) +# print(b) +# print(b_new) + +# a_new = data.read_arrays("a_table", "a") +# print(a) +# print(a_new) +# b_new = data.read_arrays("b_table", "b") +# print(b) +# print(b_new) + +# a_new, b_new = data.read_arrays("ab_table_noname") +# print(a) +# print(a_new) +# print(b) +# print(b_new) + +# b_new = data.read_arrays("b_table_noname") +# print(b) +# print(b_new) + +# dic = {"name": "Giacomo", "age": 33} +# data.write_dictionary("dic_table", dic) +# dic_new = data.read_dictionary("dic_table") +# print(dic) +# print(dic_new) + +# data_read = database("test") +# a_new, b_new = data_read.read_arrays("ab_table", ['a', 'b']) +# print(a) +# print(a_new) +# print(b) +# print(b_new) + + +# if __name__ == "__main__": +# main() diff --git a/pySDC/projects/Monodomain/visualization/show_monodomain_sol.py b/pySDC/projects/Monodomain/visualization/show_monodomain_sol.py new file mode 100644 index 0000000000..47fba6aede --- /dev/null +++ b/pySDC/projects/Monodomain/visualization/show_monodomain_sol.py @@ -0,0 +1,99 @@ +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.animation as animation +from pathlib import Path +import os + +# Script for displaying the solution of the monodomain equation + +executed_file_dir = os.path.dirname(os.path.realpath(__file__)) +output_root = executed_file_dir + "/../../../../data/Monodomain/results_tmp/" +domain_name = "cube_1D" +refinements = 2 +ionic_model = "TTP" +file_name = "monodomain" +file_path = Path(output_root + domain_name + "/" + "ref_" + str(refinements) + "/" + ionic_model + "/" + file_name) + + +# no need to modifiy below this line +# ------------------------------------------------------------------------------ +if not file_path.with_suffix(".npy").is_file(): + print(f"File {str(file_path)} does not exist") + exit() + +with open(str(file_path) + "_txyz.npy", "rb") as file: + t = np.load(file, allow_pickle=True) +n_dt = t.size +print(f"t_end = {t[-1]}, n_dt = {n_dt}") + +V = [] +with open(file_path.with_suffix(".npy"), "rb") as file: + for _ in range(n_dt): + V.append(np.load(file, allow_pickle=True)) + +Vmin = np.min(V[0].flatten()) +Vmax = np.max(V[0].flatten()) +for Vi in V: + Vmin = min(Vmin, np.min(Vi.flatten())) + Vmax = max(Vmax, np.max(Vi.flatten())) + +Vmin = 1.1 * max(Vmin, -100) +Vmax = 1.1 * min(Vmax, 200) +print(f"Vmin = {Vmin}, Vmax = {Vmax}") + +dim = len(V[0].shape) + +with open(str(file_path) + "_txyz.npy", "rb") as file: + t = np.load(file, allow_pickle=True) + xyz = [] + for _ in range(dim): + xyz.append(np.load(file, allow_pickle=True)) + +if dim == 1: + fig, ax = plt.subplots() + ax.set(ylim=[Vmin, Vmax], xlabel="x [mm]", ylabel="V [mV]") + line = ax.plot(xyz[0], V[0])[0] +elif dim == 2: + fig, ax = plt.subplots() + ax.set(xlabel="x [mm]", ylabel="y [mm]") + ax.set_aspect(aspect="equal") + line = ax.pcolormesh(xyz[0], xyz[1], V[0], cmap=plt.cm.jet, vmin=Vmin, vmax=Vmax) + fig.colorbar(line) +elif dim == 3: + Z, Y, X = np.meshgrid(xyz[2].ravel(), xyz[1].ravel(), xyz[0].ravel(), indexing="ij") + kw = {"vmin": Vmin, "vmax": Vmax, "levels": np.linspace(Vmin, Vmax, 10)} + # fig, ax = plt.subplots(projection="3d") + fig = plt.figure(figsize=(5, 4)) + ax = fig.add_subplot(111, projection="3d") + A = ax.contourf(V[0][:, :, 0], Y[:, :, 0], Z[:, :, 0], zdir="x", offset=0, **kw) + B = ax.contourf(X[:, 0, :], V[0][:, 0, :], Z[:, 0, :], zdir="y", offset=0, **kw) + C = ax.contourf(X[0, :, :], Y[0, :, :], V[0][0, :, :], zdir="z", offset=0, **kw) + # D = ax.contourf(V[0][:, :, -1], Y[:, :, -1], Z[:, :, -1], zdir="x", offset=X.max(), **kw) + + xmin, xmax = X.min(), X.max() + ymin, ymax = Y.min(), Y.max() + zmin, zmax = Z.min(), Z.max() + ax.set(xlim=[xmin, xmax], ylim=[ymin, ymax], zlim=[zmin, zmax]) + ax.set(xlabel="x [mm]", ylabel="y [mm]", zlabel="z [mm]") + fig.colorbar(A, ax=ax, fraction=0.02, pad=0.1, label="V [mV]") + + +def plot_V(k): + ax.set_title(f"V at t = {t[k]:.3f} [ms]") + if dim == 1: + line.set_ydata(V[k]) + return line + elif dim == 2: + line.set_array(V[k].flatten()) + return line + elif dim == 3: + A = ax.contourf(V[k][:, :, 0], Y[:, :, 0], Z[:, :, 0], zdir="x", offset=0, **kw) + B = ax.contourf(X[:, 0, :], V[k][:, 0, :], Z[:, 0, :], zdir="y", offset=0, **kw) + C = ax.contourf(X[0, :, :], Y[0, :, :], V[k][0, :, :], zdir="z", offset=0, **kw) + # D = ax.contourf(V[k][:, :, -1], Y[:, :, -1], Z[:, :, -1], zdir="x", offset=X.max(), **kw) + return A, B, C + + +anim = animation.FuncAnimation(fig=fig, func=plot_V, interval=1, frames=n_dt, repeat=False) +plt.show() +# anim.save(file_path.with_suffix(".gif")) diff --git a/pySDC/projects/PinTSimE/battery_model.py b/pySDC/projects/PinTSimE/battery_model.py index 20f2e11f1c..3c09be10b8 100644 --- a/pySDC/projects/PinTSimE/battery_model.py +++ b/pySDC/projects/PinTSimE/battery_model.py @@ -167,10 +167,13 @@ def generateDescription( 'convergence_controllers': convergence_controllers, } - return description, controller_params + # instantiate controller + controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description) + + return description, controller_params, controller -def controllerRun(description, controller_params, t0, Tend, exact_event_time_avail=False): +def controllerRun(description, controller_params, controller, t0, Tend, exact_event_time_avail=False): """ Executes a controller run for a problem defined in the description. @@ -180,6 +183,8 @@ def controllerRun(description, controller_params, t0, Tend, exact_event_time_ava Contains all information for a controller run. controller_params : dict Parameters needed for a controller run. + controller : pySDC.core.Controller + Controller to do the stuff. t0 : float Starting time of simulation. Tend : float @@ -193,9 +198,6 @@ def controllerRun(description, controller_params, t0, Tend, exact_event_time_ava Raw statistics from a controller run. """ - # instantiate controller - controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description) - # get initial values on finest level P = controller.MS[0].levels[0].prob uinit = P.u_exact(t0) @@ -233,7 +235,7 @@ def main(): 'max_restarts': 50, 'recomputed': False, 'tol_event': 1e-10, - 'alpha': 1.0, + 'alpha': 0.96, 'exact_event_time_avail': None, } @@ -244,8 +246,8 @@ def main(): hook_class = [LogSolution, LogEventBattery, LogEmbeddedErrorEstimate, LogStepSize] - use_detection = [True] - use_adaptivity = [True] + use_detection = [True, False] + use_adaptivity = [True, False] for problem, sweeper in zip([battery, battery_implicit], [imex_1st_order, generic_implicit]): for defaults in [False, True]: @@ -360,7 +362,7 @@ def runSimulation(problem, sweeper, all_params, use_adaptivity, use_detection, h restol = -1 if use_A else handling_params['restol'] - description, controller_params = generateDescription( + description, controller_params, controller = generateDescription( dt=dt, problem=problem, sweeper=sweeper, @@ -381,6 +383,7 @@ def runSimulation(problem, sweeper, all_params, use_adaptivity, use_detection, h stats, t_switch_exact = controllerRun( description=description, controller_params=controller_params, + controller=controller, t0=interval[0], Tend=interval[-1], exact_event_time_avail=handling_params['exact_event_time_avail'], @@ -492,7 +495,7 @@ def plotSolution(u_num, prob_cls_name, use_adaptivity, use_detection): # pragma ax.set_xlabel(r'$t$', fontsize=16) ax.set_ylabel(r'$u(t)$', fontsize=16) - fig.savefig('data/{}_model_solution.png'.format(prob_cls_name), dpi=300, bbox_inches='tight') + fig.savefig(f'data/{prob_cls_name}_model_solution.png', dpi=300, bbox_inches='tight') plt_helper.plt.close(fig) diff --git a/pySDC/projects/PinTSimE/buck_model.py b/pySDC/projects/PinTSimE/buck_model.py index 3a2423d339..1394d8f397 100644 --- a/pySDC/projects/PinTSimE/buck_model.py +++ b/pySDC/projects/PinTSimE/buck_model.py @@ -55,7 +55,7 @@ def main(): use_adaptivity=use_adaptivity, use_detection=use_detection, hook_class=hook_class, - interval=(0.0, 2e-2), + interval=(0.0, 1e-2), dt_list=[1e-5, 2e-5], nnodes=[M_fix], ) diff --git a/pySDC/projects/PinTSimE/discontinuous_test_ODE.py b/pySDC/projects/PinTSimE/discontinuous_test_ODE.py index 46bee5c4d9..22f36fe426 100644 --- a/pySDC/projects/PinTSimE/discontinuous_test_ODE.py +++ b/pySDC/projects/PinTSimE/discontinuous_test_ODE.py @@ -68,8 +68,9 @@ def main(): 'max_restarts': 50, 'recomputed': False, 'tol_event': 1e-12, - 'alpha': 1.0, + 'alpha': 0.96, 'exact_event_time_avail': True, + 'typeFD': 'backward', } # ---- all parameters are stored in this dictionary ---- diff --git a/pySDC/projects/PinTSimE/estimation_check.py b/pySDC/projects/PinTSimE/estimation_check.py index 61fb858584..de05b8d6b3 100644 --- a/pySDC/projects/PinTSimE/estimation_check.py +++ b/pySDC/projects/PinTSimE/estimation_check.py @@ -45,7 +45,7 @@ def run_estimation_check(): 'max_restarts': 50, 'recomputed': False, 'tol_event': 1e-10, - 'alpha': 1.0, + 'alpha': 0.96, 'exact_event_time_avail': None, } @@ -114,7 +114,7 @@ def run_estimation_check(): plotAccuracyCheck(u_num, prob_cls_name, M_fix) - plotStateFunctionAroundEvent(u_num, prob_cls_name, M_fix) + # plotStateFunctionAroundEvent(u_num, prob_cls_name, M_fix) plotStateFunctionOverTime(u_num, prob_cls_name, M_fix) @@ -187,6 +187,9 @@ def plotStateFunctionAroundEvent(u_num, prob_cls_name, M_fix): # pragma: no cov Routine that plots the state function at time before the event, exactly at the event, and after the event. Note that this routine does make sense only for a state function that remains constant after the event. + TODO: Function still does not work as expected. Every time when the switch estimator is adapted, the tolerances + does not suit anymore! + Parameters ---------- u_num : dict @@ -239,15 +242,18 @@ def plotStateFunctionAroundEvent(u_num, prob_cls_name, M_fix): # pragma: no cov if use_SE: t_switches = [u_num[dt][M_fix][use_SE][use_A]['t_switches'] for dt in dt_list] - t_switch = [t_event[i] for t_event in t_switches] + for t_switch_item in t_switches: + mask = np.append([True], np.abs(t_switch_item[1:] - t_switch_item[:-1]) > 1e-10) + t_switch_item = t_switch_item[mask] + t_switch = [t_event[i] for t_event in t_switches] ax[0, ind].plot( dt_list, [ h_item[m] for (t_item, h_item, t_switch_item) in zip(t, h, t_switch) for m in range(len(t_item)) - if abs(t_item[m] - t_switch_item) <= 1e-14 + if abs(t_item[m] - t_switch_item) <= 2.7961188919789493e-11 ], color='limegreen', marker='s', diff --git a/pySDC/projects/PinTSimE/hardcoded_solutions.py b/pySDC/projects/PinTSimE/hardcoded_solutions.py index 27733a12ec..3a8b7b93f6 100644 --- a/pySDC/projects/PinTSimE/hardcoded_solutions.py +++ b/pySDC/projects/PinTSimE/hardcoded_solutions.py @@ -43,23 +43,23 @@ def testSolution(u_num, prob_cls_name, dt, use_adaptivity, use_detection): msg = f"Error when using switch estimator and adaptivity for {prob_cls_name} for dt={dt:.1e}:" if dt == 1e-2: expected = { - 'iL': 0.5614559718189012, - 'vC': 1.0053361988800296, - 't_switches': [0.18232155679214296], - 'dt': 0.11767844320785703, - 'e_em': 7.811640223565064e-12, - 'sum_restarts': 3.0, - 'sum_niters': 56.0, + 'iL': 0.5393867577881641, + 'vC': 0.9999999999913842, + 't_switches': [0.1823215567921536, 0.18232155679215356, 0.18232155679173784], + 'dt': 0.09453745651144455, + 'e_em': 1.7587042933087105e-12, + 'sum_restarts': 15.0, + 'sum_niters': 280.0, } elif dt == 1e-3: expected = { - 'iL': 0.5393867578949986, - 'vC': 1.0000000000165197, - 't_switches': [0.18232155677793654], - 'dt': 0.015641173481932502, - 'e_em': 2.220446049250313e-16, - 'sum_restarts': 14.0, - 'sum_niters': 328.0, + 'iL': 0.5393867577223005, + 'vC': 0.9999999999813279, + 't_switches': [0.18232155676894835, 0.1823215567897308, 0.18232155678877865], + 'dt': 0.06467602356229402, + 'e_em': 1.1468603844377867e-13, + 'sum_restarts': 17.0, + 'sum_niters': 368.0, } got.update( { @@ -75,19 +75,19 @@ def testSolution(u_num, prob_cls_name, dt, use_adaptivity, use_detection): msg = f"Error when using switch estimator for {prob_cls_name} for dt={dt:.1e}:" if dt == 1e-2: expected = { - 'iL': 0.549122133626298, - 'vC': 0.9999999999999998, - 't_switches': [0.1823215567939562], - 'sum_restarts': 4.0, - 'sum_niters': 296.0, + 'iL': 0.5456190026495924, + 'vC': 0.9991666666670524, + 't_switches': [0.18232155679395579, 0.18232155679395592, 0.18232155679356965], + 'sum_restarts': 14.0, + 'sum_niters': 416.0, } elif dt == 1e-3: expected = { - 'iL': 0.5408462989990014, - 'vC': 1.0, - 't_switches': [0.18232155679395023], - 'sum_restarts': 2.0, - 'sum_niters': 2424.0, + 'iL': 0.5403849766797957, + 'vC': 0.9999166666675774, + 't_switches': [0.18232155679395004, 0.18232155679303919], + 'sum_restarts': 11.0, + 'sum_niters': 2536.0, } got.update( { @@ -151,23 +151,23 @@ def testSolution(u_num, prob_cls_name, dt, use_adaptivity, use_detection): msg = f"Error when using switch estimator and adaptivity for {prob_cls_name} for dt={dt:.1e}:" if dt == 1e-2: expected = { - 'iL': 0.5614559717904407, - 'vC': 1.0053361988803866, - 't_switches': [0.18232155679736195], - 'dt': 0.11767844320263804, + 'iL': 0.5393867577468375, + 'vC': 0.9999999999980123, + 't_switches': [0.18232155680038617, 0.1823215568023739], + 'dt': 0.08896232033732146, 'e_em': 2.220446049250313e-16, - 'sum_restarts': 3.0, - 'sum_niters': 56.0, + 'sum_restarts': 15.0, + 'sum_niters': 280.0, } elif dt == 1e-3: expected = { - 'iL': 0.5393867577837699, - 'vC': 1.0000000000250129, - 't_switches': [0.1823215568036829], - 'dt': 0.015641237833012522, + 'iL': 0.5393867576415584, + 'vC': 0.9999999999802239, + 't_switches': [0.18232155678530526, 0.1823215568066914, 0.1823215568057151], + 'dt': 0.06333183541149384, 'e_em': 2.220446049250313e-16, - 'sum_restarts': 14.0, - 'sum_niters': 328.0, + 'sum_restarts': 17.0, + 'sum_niters': 368.0, } got.update( { @@ -183,19 +183,19 @@ def testSolution(u_num, prob_cls_name, dt, use_adaptivity, use_detection): msg = f"Error when using switch estimator for {prob_cls_name} for dt={dt:.1e}:" if dt == 1e-2: expected = { - 'iL': 0.5456190026227917, - 'vC': 0.999166666666676, - 't_switches': [0.18232155679663525], - 'sum_restarts': 4.0, - 'sum_niters': 296.0, + 'iL': 0.5490992952561473, + 'vC': 0.9999999999982524, + 't_switches': [0.1823215567992934, 0.18232155680104123], + 'sum_restarts': 14.0, + 'sum_niters': 416.0, } elif dt == 1e-3: expected = { - 'iL': 0.5407340515794409, - 'vC': 1.0000000000010945, - 't_switches': [0.182321556796257], - 'sum_restarts': 3.0, - 'sum_niters': 2440.0, + 'iL': 0.5407340516779595, + 'vC': 0.9999999999936255, + 't_switches': [0.18232155676519302], + 'sum_restarts': 10.0, + 'sum_niters': 2536.0, } got.update( { @@ -259,25 +259,25 @@ def testSolution(u_num, prob_cls_name, dt, use_adaptivity, use_detection): msg = f"Error when using switch estimator and adaptivity for {prob_cls_name} for dt={dt:.1e}:" if dt == 1e-2: expected = { - 'iL': 0.6244130166029733, - 'vC1': 0.999647921822499, - 'vC2': 1.0000000000714673, - 't_switches': [0.18232155679216916, 0.3649951297770592], - 'dt': 0.01, - 'e_em': 2.220446049250313e-16, - 'sum_restarts': 19.0, - 'sum_niters': 400.0, + 'iL': 0.6125019898578352, + 'vC1': 1.0000000000471956, + 'vC2': 1.0000000000165106, + 't_switches': [0.18232155678158268, 0.36464311353802376], + 'dt': 0.0985931246953285, + 'e_em': 2.295386103412511e-12, + 'sum_restarts': 24.0, + 'sum_niters': 552.0, } elif dt == 1e-3: expected = { - 'iL': 0.6112496171462107, - 'vC1': 0.9996894956748836, - 'vC2': 1.0, - 't_switches': [0.1823215567907929, 0.3649535697059346], - 'dt': 0.07298158272977251, - 'e_em': 2.703393064962256e-13, - 'sum_restarts': 11.0, - 'sum_niters': 216.0, + 'iL': 0.6125019901065321, + 'vC1': 1.0000000000787372, + 'vC2': 1.000000000028657, + 't_switches': [0.1823215567907939, 0.3646431134803315], + 'dt': 0.07154669986159717, + 'e_em': 2.3381296898605797e-13, + 'sum_restarts': 22.0, + 'sum_niters': 472.0, } got.update( { @@ -293,21 +293,21 @@ def testSolution(u_num, prob_cls_name, dt, use_adaptivity, use_detection): msg = f"Error when using switch estimator for {prob_cls_name} for dt={dt:.1e}:" if dt == 1e-2: expected = { - 'iL': 0.6314080101219072, - 'vC1': 0.9999999999999998, - 'vC2': 0.9999999999999996, - 't_switches': [0.1823215567939562, 0.3646431135879125], - 'sum_restarts': 8.0, - 'sum_niters': 512.0, + 'iL': 0.6313858468030417, + 'vC1': 1.0000000002414198, + 'vC2': 1.0000000000095093, + 't_switches': [0.18232155679395579, 0.3646431133464922], + 'sum_restarts': 19.0, + 'sum_niters': 664.0, } elif dt == 1e-3: expected = { - 'iL': 0.6152346866530549, - 'vC1': 1.0, - 'vC2': 1.0, - 't_switches': [0.18232155679395023, 0.3646431135879003], - 'sum_restarts': 4.0, - 'sum_niters': 4048.0, + 'iL': 0.6151254295045797, + 'vC1': 1.0000000000227713, + 'vC2': 1.0000000000329365, + 't_switches': [0.18232155680153855, 0.3646431135651182], + 'sum_restarts': 16.0, + 'sum_niters': 4224.0, } got.update( { @@ -375,21 +375,21 @@ def testSolution(u_num, prob_cls_name, dt, use_adaptivity, use_detection): msg = f"Error when using switch estimator for {prob_cls_name} for dt={dt:.1e}:" if dt == 1e-2: expected = { - 'u': 5.9941358952954955, - 't_switches': [1.6094379124671208], - 'sum_restarts': 25.0, - 'sum_niters': 710.0, - 'e_global': 8.195133460731086e-11, - 'e_event': [3.302047524300633e-11], + 'u': 5.998326995729771, + 'e_global': 0.0041911003550794135, + 't_switches': [1.6094379124660123], + 'e_event': [3.1912028575220575e-11], + 'sum_restarts': 22.0, + 'sum_niters': 675.0, } elif dt == 1e-3: expected = { - 'u': 5.971767837651004, - 't_switches': [1.6094379124247695], - 'sum_restarts': 23.0, - 'sum_niters': 2388.0, - 'e_global': 2.3067769916451653e-11, - 'e_event': [9.330758388159666e-12], + 'u': 5.9721869476933005, + 'e_global': 0.0004191100622819022, + 't_switches': [1.6094379124262566, 1.6094379124260099], + 'e_event': [7.843725668976731e-12], + 'sum_restarts': 20.0, + 'sum_niters': 2352.0, } got.update( { @@ -406,14 +406,14 @@ def testSolution(u_num, prob_cls_name, dt, use_adaptivity, use_detection): if dt == 1e-2: expected = { 'u': 5.9805345175338225, - 'sum_niters': 527.0, 'e_global': 0.009855041056925806, + 'sum_niters': 527.0, } elif dt == 1e-3: expected = { 'u': 5.9737411566014105, - 'sum_niters': 2226.0, 'e_global': 0.0005763403865515215, + 'sum_niters': 2226.0, } got.update( { @@ -450,17 +450,17 @@ def testSolution(u_num, prob_cls_name, dt, use_adaptivity, use_detection): msg = f"Error for {prob_cls_name} for dt={dt:.1e}:" if dt == 2e-5: expected = { - 'vC1': 9.890997780767632, - 'vC2': 4.710415385551326, - 'iLp': -0.315406990615236, - 'sum_niters': 5036.0, + 'vC1': 9.781955920747619, + 'vC2': 6.396971204930281, + 'iLp': -1.1056614708409171, + 'sum_niters': 2519.0, } elif dt == 1e-5: expected = { - 'vC1': 9.891508522329485, - 'vC2': 4.70939963429714, - 'iLp': -0.32177442457657557, - 'sum_niters': 8262.0, + 'vC1': 9.782142840662102, + 'vC2': 6.388775533709242, + 'iLp': -1.0994027552202539, + 'sum_niters': 4242.0, } got.update( { @@ -474,7 +474,10 @@ def testSolution(u_num, prob_cls_name, dt, use_adaptivity, use_detection): for key in expected.keys(): if key == 't_switches' or key == 'e_event': err_msg = f'{msg} Expected {key}={expected[key]}, got {key}={got[key]}' - assert all(np.isclose(expected[key], got[key], atol=1e-4)) == True, err_msg + if len(expected[key]) == got[key]: + assert np.allclose(expected[key], got[key], atol=1e-4) == True, err_msg + else: + assert np.isclose(expected[key][-1], got[key][-1], atol=1e-4) == True, err_msg else: err_msg = f'{msg} Expected {key}={expected[key]:.4e}, got {key}={got[key]:.4e}' assert np.isclose(expected[key], got[key], atol=1e-4), err_msg diff --git a/pySDC/projects/PinTSimE/paper_PSCC2024/log_event.py b/pySDC/projects/PinTSimE/paper_PSCC2024/log_event.py index 6db702465a..02e724d209 100644 --- a/pySDC/projects/PinTSimE/paper_PSCC2024/log_event.py +++ b/pySDC/projects/PinTSimE/paper_PSCC2024/log_event.py @@ -8,7 +8,7 @@ class LogEventDiscontinuousTestDAE(hooks): """ def post_step(self, step, level_number): - super(LogEventDiscontinuousTestDAE, self).post_step(step, level_number) + super().post_step(step, level_number) L = step.levels[level_number] @@ -21,7 +21,7 @@ def post_step(self, step, level_number): iter=0, sweep=L.status.sweep, type='state_function', - value=2 * L.uend[0] - 100, + value=2 * L.uend.diff[0] - 100, ) @@ -32,7 +32,7 @@ class LogEventWSCC9(hooks): """ def post_step(self, step, level_number): - super(LogEventWSCC9, self).post_step(step, level_number) + super().post_step(step, level_number) L = step.levels[level_number] P = L.prob @@ -46,5 +46,5 @@ def post_step(self, step, level_number): iter=0, sweep=L.status.sweep, type='state_function', - value=L.uend[10 * P.m] - P.psv_max, + value=L.uend.diff[10 * P.m] - P.psv_max, ) diff --git a/pySDC/projects/PinTSimE/switch_estimator.py b/pySDC/projects/PinTSimE/switch_estimator.py index 3c438c134a..d9e9574544 100644 --- a/pySDC/projects/PinTSimE/switch_estimator.py +++ b/pySDC/projects/PinTSimE/switch_estimator.py @@ -1,9 +1,11 @@ import numpy as np import scipy as sp +from pySDC.core.Errors import ParameterError from pySDC.core.Collocation import CollBase from pySDC.core.ConvergenceController import ConvergenceController, Status from pySDC.implementations.convergence_controller_classes.check_convergence import CheckConvergence +from pySDC.core.Lagrange import LagrangeApproximation class SwitchEstimator(ConvergenceController): @@ -16,9 +18,11 @@ def setup(self, controller, params, description): r""" Function sets default variables to handle with the event at the beginning. The default params are: - - control_order : controls the order of the SE's call of convergence controllers - - coll.nodes : defines the collocation nodes for interpolation - - tol_zero : inner tolerance for SE; state function has to satisfy it to terminate + - control_order : controls the order of the SE's call of convergence controllers. + - coll.nodes : defines the collocation nodes for interpolation. + - tol_zero : inner tolerance for SE; state function has to satisfy it to terminate. + - t_interp : interpolation axis with time points. + - state_function : List of values from state function. Parameters ---------- @@ -45,7 +49,9 @@ def setup(self, controller, params, description): defaults = { 'control_order': 0, 'nodes': coll.nodes, - 'tol_zero': 1e-13, + 'tol_zero': 2.5e-12, + 't_interp': [], + 'state_function': [], } return {**defaults, **params} @@ -88,22 +94,25 @@ def get_new_step_size(self, controller, S, **kwargs): L = S.levels[0] if CheckConvergence.check_convergence(S): - self.status.switch_detected, m_guess, state_function = L.prob.get_switching_info(L.u, L.time) + self.status.switch_detected, m_guess, self.params.state_function = L.prob.get_switching_info(L.u, L.time) if self.status.switch_detected: - t_interp = [L.time + L.dt * self.params.nodes[m] for m in range(len(self.params.nodes))] - t_interp, state_function = self.adapt_interpolation_info( - L.time, L.sweep.coll.left_is_node, t_interp, state_function + self.params.t_interp = [L.time + L.dt * self.params.nodes[m] for m in range(len(self.params.nodes))] + self.params.t_interp, self.params.state_function = self.adapt_interpolation_info( + L.time, L.sweep.coll.left_is_node, self.params.t_interp, self.params.state_function ) # when the state function is already close to zero the event is already resolved well - if abs(state_function[-1]) <= self.params.tol_zero or abs(state_function[0]) <= self.params.tol_zero: - if abs(state_function[0]) <= self.params.tol_zero: - t_switch = t_interp[0] + if ( + abs(self.params.state_function[-1]) <= self.params.tol_zero + or abs(self.params.state_function[0]) <= self.params.tol_zero + ): + if abs(self.params.state_function[0]) <= self.params.tol_zero: + t_switch = self.params.t_interp[0] boundary = 'left' - elif abs(state_function[-1]) <= self.params.tol_zero: + elif abs(self.params.state_function[-1]) <= self.params.tol_zero: boundary = 'right' - t_switch = t_interp[-1] + t_switch = self.params.t_interp[-1] msg = f"The value of state function is close to zero, thus event time is already close enough to the {boundary} end point!" self.log(msg, S) @@ -115,27 +124,19 @@ def get_new_step_size(self, controller, S, **kwargs): self.status.is_zero = True # intermediate value theorem states that a root is contained in current step - if state_function[0] * state_function[-1] < 0 and self.status.is_zero is None: - self.status.t_switch = self.get_switch(t_interp, state_function, m_guess) - - controller.hooks[0].add_to_stats( - process=S.status.slot, - time=L.time, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='switch_all', - value=self.status.t_switch, - ) - controller.hooks[0].add_to_stats( - process=S.status.slot, - time=L.time, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='h_all', - value=max([abs(item) for item in state_function]), + if self.params.state_function[0] * self.params.state_function[-1] < 0 and self.status.is_zero is None: + self.status.t_switch = self.get_switch(self.params.t_interp, self.params.state_function, m_guess) + + self.logging_during_estimation( + controller.hooks[0], + S.status.slot, + L.time, + L.level_index, + L.status.sweep, + self.status.t_switch, + self.params.state_function, ) + if L.time < self.status.t_switch < L.time + L.dt: dt_switch = (self.status.t_switch - L.time) * self.params.alpha @@ -254,9 +255,30 @@ def log_event_time(controller_hooks, process, time, level, sweep, t_switch): value=t_switch, ) + @staticmethod + def logging_during_estimation(controller_hooks, process, time, level, sweep, t_switch, state_function): + controller_hooks.add_to_stats( + process=process, + time=time, + level=level, + iter=0, + sweep=sweep, + type='switch_all', + value=t_switch, + ) + controller_hooks.add_to_stats( + process=process, + time=time, + level=level, + iter=0, + sweep=sweep, + type='h_all', + value=max([abs(item) for item in state_function]), + ) + @staticmethod def get_switch(t_interp, state_function, m_guess): - """ + r""" Routine to do the interpolation and root finding stuff. Parameters @@ -274,27 +296,17 @@ def get_switch(t_interp, state_function, m_guess): Time point of found event. """ - LagrangeInterpolator = LagrangeInterpolation(t_interp, state_function) + LagrangeInterpolation = LagrangeApproximation(points=t_interp, fValues=state_function) + p = lambda t: LagrangeInterpolation.__call__(t) - def p(t): - """ - Simplifies the call of the interpolant. + def fprime(t): + r""" + Computes the derivative of the scalar interpolant using finite difference. Here, + the derivative is approximated by the backward difference: - Parameters - ---------- - t : float - Time t at which the interpolant is called. + .. math:: + \frac{dp}{dt} \approx \frac{25 p(t) - 48 p(t - h) + 36 p(t - 2 h) - 16 p(t - 3h) + 3 p(t - 4 h)}{12 h} - Returns - ------- - p(t) : float - The value of the interpolated function at time t. - """ - return LagrangeInterpolator.eval(t) - - def fprime(t): - """ - Computes the derivative of the scalar interpolant using finite differences. Parameters ---------- @@ -306,11 +318,14 @@ def fprime(t): dp : float Derivative of interpolation p at time t. """ + dt_FD = 1e-10 - dp = (p(t + dt_FD) - p(t)) / dt_FD # forward difference + dp = ( + 25 * p(t) - 48 * p(t - dt_FD) + 36 * p(t - 2 * dt_FD) - 16 * p(t - 3 * dt_FD) + 3 * p(t - 4 * dt_FD) + ) / (12 * dt_FD) return dp - newton_tol, newton_maxiter = 1e-15, 100 + newton_tol, newton_maxiter = 1e-14, 100 t_switch = newton(t_interp[m_guess], p, fprime, newton_tol, newton_maxiter) return t_switch @@ -361,7 +376,7 @@ def newton(x0, p, fprime, newton_tol, newton_maxiter): p : callable Interpolated function where Newton's method is applied at. fprime : callable - Approximated erivative of p using finite differences. + Approximated derivative of p using finite differences. newton_tol : float Tolerance for termination. newton_maxiter : int @@ -375,57 +390,19 @@ def newton(x0, p, fprime, newton_tol, newton_maxiter): n = 0 while n < newton_maxiter: - if abs(p(x0)) < newton_tol or np.isnan(p(x0)) and np.isnan(fprime(x0)): + res = abs(p(x0)) + if res < newton_tol or np.isnan(p(x0)) and np.isnan(fprime(x0)) or np.isclose(fprime(x0), 0.0): break x0 -= 1.0 / fprime(x0) * p(x0) n += 1 - root = x0 + if n == newton_maxiter: + msg = f'Newton did not converge after {n} iterations, error is {res}' + else: + msg = f'Newton did converge after {n} iterations, error for root {x0} is {res}' + print(msg) + root = x0 return root - - -class LagrangeInterpolation(object): - def __init__(self, ti, yi): - """Initialization routine""" - self.ti = np.asarray(ti) - self.yi = np.asarray(yi) - self.n = len(ti) - - def get_Lagrange_polynomial(self, t, i): - """ - Computes the basis of the i-th Lagrange polynomial. - - Parameters - ---------- - t : float - Time where the polynomial is computed at. - i : int - Index of the Lagrange polynomial - - Returns - ------- - product : float - The product of the bases. - """ - product = np.prod([(t - self.ti[k]) / (self.ti[i] - self.ti[k]) for k in range(self.n) if k != i]) - return product - - def eval(self, t): - """ - Evaluates the Lagrange interpolation at time t. - - Parameters - ---------- - t : float - Time where interpolation is computed. - - Returns - ------- - p : float - Value of interpolant at time t. - """ - p = np.sum([self.yi[i] * self.get_Lagrange_polynomial(t, i) for i in range(self.n)]) - return p diff --git a/pySDC/projects/Resilience/README.rst b/pySDC/projects/Resilience/README.rst index 9e292c0db6..f89e8db843 100644 --- a/pySDC/projects/Resilience/README.rst +++ b/pySDC/projects/Resilience/README.rst @@ -47,15 +47,13 @@ To reproduce the plots you need to install pySDC with all packages in the mpi4py Then, navigate to this directory, `pySDC/projects/Resilience/` and run the following commands: -```bash +.. code-block:: bash -mpirun -np 4 python work_precision.py -mpirun -np 4 python fault_stats.py prob run_vdp -mpirun -np 4 python fault_stats.py prob run_quench -mpirun -np 4 python fault_stats.py prob run_AC -mpirun -np 4 python fault_stats.py prob run_Schroedinger -python paper_plots.py - -``` + mpirun -np 4 python work_precision.py + mpirun -np 4 python fault_stats.py prob run_vdp + mpirun -np 4 python fault_stats.py prob run_quench + mpirun -np 4 python fault_stats.py prob run_AC + mpirun -np 4 python fault_stats.py prob run_Schroedinger + python paper_plots.py Possibly, you need to create some directories in this one to store and load things, if path errors occur. diff --git a/pySDC/projects/Resilience/paper_plots.py b/pySDC/projects/Resilience/paper_plots.py index 9804ebc2b9..b48056acbb 100644 --- a/pySDC/projects/Resilience/paper_plots.py +++ b/pySDC/projects/Resilience/paper_plots.py @@ -214,9 +214,9 @@ def compare_recovery_rate_problems(**kwargs): # pragma: no cover ax.get_legend().remove() if kwargs.get('strategy_type', 'SDC') == 'SDC': - axs[1, 1].legend(frameon=False) + axs[1, 1].legend(frameon=False, loc="lower right") else: - axs[0, 1].legend(frameon=False) + axs[0, 1].legend(frameon=False, loc="lower right") axs[0, 0].set_ylim((-0.05, 1.05)) axs[1, 0].set_ylabel('recovery rate') axs[0, 0].set_ylabel('recovery rate') @@ -236,7 +236,6 @@ def plot_adaptivity_stuff(): # pragma: no cover Returns: None """ - from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError from pySDC.implementations.hooks.log_errors import LogLocalErrorPostStep from pySDC.implementations.hooks.log_work import LogWork from pySDC.projects.Resilience.hook import LogData @@ -402,8 +401,8 @@ def plot_quench_solution(): # pragma: no cover u = get_sorted(stats, type='u', recomputed=False) ax.plot([me[0] for me in u], [max(me[1]) for me in u], color='black', label='$T$') - ax.axhline(prob.u_thresh, label='$T_\mathrm{thresh}$', ls='--', color='grey', zorder=-1) - ax.axhline(prob.u_max, label='$T_\mathrm{max}$', ls=':', color='grey', zorder=-1) + ax.axhline(prob.u_thresh, label=r'$T_\mathrm{thresh}$', ls='--', color='grey', zorder=-1) + ax.axhline(prob.u_max, label=r'$T_\mathrm{max}$', ls=':', color='grey', zorder=-1) ax.set_xlabel(r'$t$') ax.legend(frameon=False) @@ -470,14 +469,6 @@ def plot_vdp_solution(): # pragma: no cover def work_precision(): # pragma: no cover from pySDC.projects.Resilience.work_precision import ( all_problems, - single_problem, - ODEs, - get_fig, - execute_configurations, - save_fig, - get_configs, - MPI, - vdp_stiffness_plot, ) all_params = { @@ -491,47 +482,6 @@ def work_precision(): # pragma: no cover for mode in ['compare_strategies', 'parallel_efficiency', 'RK_comp']: all_problems(**all_params, mode=mode) - # # Quench stuff - # fig, axs = get_fig(x=3, y=1, figsize=figsize_by_journal('Springer_Numerical_Algorithms', 1, 0.47)) - # quench_params = { - # **all_params, - # 'problem': run_quench, - # 'decorate': True, - # 'configurations': get_configs('step_size_limiting', run_quench), - # 'num_procs': 1, - # 'runs': 1, - # 'comm_world': MPI.COMM_WORLD, - # 'mode': 'step_size_limiting', - # } - # quench_params.pop('base_path', None) - # execute_configurations(**{**quench_params, 'work_key': 'k_SDC', 'precision_key': 'k_Newton'}, ax=axs[2]) - # execute_configurations(**{**quench_params, 'work_key': 'param', 'precision_key': 'restart'}, ax=axs[1]) - # execute_configurations(**{**quench_params, 'work_key': 't', 'precision_key': 'e_global_rel'}, ax=axs[0]) - # axs[1].set_yscale('linear') - # # axs[2].set_yscale('linear') - # axs[2].set_xscale('linear') - # axs[1].set_xlabel(r'$e_\mathrm{tol}$') - # # axs[0].set_xticks([1e0, 3e0], [r'$10^{0}$', r'$3\times 10^{0}$'], minor=False) - - # for ax in axs: - # ax.set_title(ax.get_ylabel()) - # ax.set_ylabel('') - # fig.suptitle('Quench') - - # axs[1].set_yticks([4.0, 6.0, 8.0, 10.0, 12.0], minor=False) - - # save_fig( - # fig=fig, - # name=f'{run_quench.__name__}', - # work_key='step-size', - # precision_key='limiting', - # legend=True, - # base_path=all_params["base_path"], - # ) - # End Quench stuff - - # vdp_stiffness_plot(base_path='data/paper') - def make_plots_for_TIME_X_website(): # pragma: no cover global JOURNAL, BASE_PATH diff --git a/pySDC/projects/Resilience/strategies.py b/pySDC/projects/Resilience/strategies.py index ecc98c5203..4dff7d6ce7 100644 --- a/pySDC/projects/Resilience/strategies.py +++ b/pySDC/projects/Resilience/strategies.py @@ -473,7 +473,7 @@ def __init__(self, **kwargs): @property def label(self): - return r'$\Delta t$ adaptivity' + return r'$\Delta t$-adaptivity' def get_fixable_params(self, maxiter, **kwargs): """ @@ -716,7 +716,7 @@ def __init__(self, **kwargs): @property def label(self): - return r'$k$ adaptivity' + return r'$k$-adaptivity' def get_custom_description(self, problem, num_procs): ''' @@ -1949,4 +1949,4 @@ def get_reference_value(self, problem, key, op, num_procs=1): @property def label(self): - return r'$\Delta t$-$k~\mathrm{adaptivity}$' + return r'$\Delta t$-$k$-adaptivity' diff --git a/pySDC/projects/Resilience/work_precision.py b/pySDC/projects/Resilience/work_precision.py index 2c21e1e6a6..8ff1f92b35 100644 --- a/pySDC/projects/Resilience/work_precision.py +++ b/pySDC/projects/Resilience/work_precision.py @@ -836,7 +836,7 @@ def get_configs(mode, problem): for num_procs in [4, 1]: plotting_params = ( - {'ls': ls[num_procs], 'label': fr'$\Delta t$ adaptivity $N$={num_procs}x1'} if num_procs > 1 else {} + {'ls': ls[num_procs], 'label': fr'$\Delta t$-adaptivity $N$={num_procs}x1'} if num_procs > 1 else {} ) configurations[num_procs] = { 'strategies': [AdaptivityStrategy(useMPI=True)], @@ -853,7 +853,7 @@ def get_configs(mode, problem): 'num_procs': num_procs, 'plotting_params': { 'ls': ls.get(num_procs * 3, '-'), - 'label': rf'$\Delta t$-$k$ adaptivity $N$={num_procs}x3', + 'label': rf'$\Delta t$-$k$-adaptivity $N$={num_procs}x3', }, } @@ -1104,7 +1104,7 @@ def get_configs(mode, problem): 'strategies': [AdaptivityPolynomialError(useMPI=True)], 'num_procs': 1, 'num_procs_sweeper': 3, - 'plotting_params': {'label': r'$\Delta t$-$k$ adaptivity $N$=1x3'}, + 'plotting_params': {'label': r'$\Delta t$-$k$-adaptivity $N$=1x3'}, } configurations[-1] = { 'strategies': [ @@ -1121,7 +1121,7 @@ def get_configs(mode, problem): 'strategies': [AdaptivityStrategy(useMPI=True)], 'custom_description': desc, 'num_procs': 4, - 'plotting_params': {'label': r'$\Delta t$ adaptivity $N$=4x1'}, + 'plotting_params': {'label': r'$\Delta t$-adaptivity $N$=4x1'}, } elif mode == 'RK_comp_high_order': @@ -1274,6 +1274,7 @@ def save_fig( ncols=ncols if ncols else 3 if len(handles) % 3 == 0 else 4, frameon=False, fancybox=True, + handlelength=2.2, ) path = f'{base_path}/wp-{name}-{work_key}-{precision_key}.{format}' diff --git a/pySDC/projects/Second_orderSDC/README.md b/pySDC/projects/Second_orderSDC/README.rst similarity index 56% rename from pySDC/projects/Second_orderSDC/README.md rename to pySDC/projects/Second_orderSDC/README.rst index 510161576d..0081f6a135 100644 --- a/pySDC/projects/Second_orderSDC/README.md +++ b/pySDC/projects/Second_orderSDC/README.rst @@ -1,29 +1,42 @@ -# Spectral Deferred Correction Methods for Second-Order Problems +Spectral Deferred Correction Methods for Second-Order Problems +============================================================== Python code for implementing the paper's plots on Second-order SDC methods. -## Attribution You are welcome to use and adapt this code under the terms of the BSD license. If you utilize it, either in whole or in part, for a publication, please provide proper citation: -**Title:** Spectral Deferred Correction Methods for Second-order Problems - -**Authors:** Ikrom Akramov, Sebastian Götschel, Michael Minion, Daniel Ruprecht, and Robert Speck. +.. code-block:: tex -[![DOI](http://example.com)](http://example.com) + @misc{akramov2023spectral, + title={Spectral deferred correction methods for second-order problems}, + author={Ikrom Akramov and Sebastian Götschel and Michael Minion and Daniel Ruprecht and Robert Speck}, + year={2023}, + eprint={2310.08352}, + archivePrefix={arXiv}, + primaryClass={math.NA}} -## Reproducing Figures from the Publication -- **Fig. 1:** Execute `dampedharmonic_oscillator_run_stability.py` while setting `kappa_max=18` and `mu_max=18`. -- **Fig. 2:** Run `dampedharmonic_oscillator_run_stability.py` with the following configurations: +Reproducing Figures from the Publication +---------------------------------------- + +- **Fig. 1:** Execute `harmonic_oscillator_run_stability.py` while setting `kappa_max=18` and `mu_max=18`. +- **Fig. 2:** Run `harmonic_oscillator_run_stability.py` with the following configurations: - Set `kappa_max=30` and `mu_max=30`. - Adjust `maxiter` to 1, 2, 3, or 4 and execute each individually. +- **Table 1:** Execute `harmonic_oscillator_run_stab_interval.py`: + - For the Picard iteration set: `Picard=True` + - To save the results set: `save_interval_file=True` + +- Use the script `harmonic_oscillator_run_points.py` to create a table based on given $(\kappa, \mu)$ points. This table assists in determining suitable values for `M`, `K`, and `quadrature nodes` to ensure stability in the SDC method. + - To save the results set: `save_points_file=True` + - **Fig. 3:** Run `penningtrap_run_error.py` (Run local convergence: `conv.run_local_error()`) with `dt=0.015625/4` and `axes=(0,)`. - **Fig. 4:** Run `penningtrap_run_error.py` (Run local convergence: `conv.run_local_error()`) using `dt=0.015625*4` and `axes=(2,)`. - **Fig. 5:** Run `penningtrap_run_error.py` (Run global convergence: `conv.run_global_error()`) with `dt=0.015625*2`: - Note: Perform each run individually: first with `axes=(0,)`, then with `axes=(2,)`. - Manually set y-axis limits in `penningtrap_run_error.py`, specifically in lines 147-148. -- **Table 1:** Execute `penningtrap_run_error.py` (Run global convergence: `conv.run_global_error()`) with the following settings: +- **Table 2:** Execute `penningtrap_run_error.py` (Run global convergence: `conv.run_global_error()`) with the following settings: - Expected order and approximate order are saved in the file: `data/global_order_vs_approx_order.csv` - Set: `K_iter=(2, 3, 4, 10)` - For `M=2`: @@ -40,6 +53,6 @@ If you utilize it, either in whole or in part, for a publication, please provide - Set `dt=0.015625*4`, and `K_iter=(2, 4, 6)`, and `dt_cont=2` for `axis=(0,)`. -## Who do I talk to? - -This code is written by [Ikrom Akramov](https://www.mat.tuhh.de/home/iakramov/?homepage_id=iakramov). +Contact +------- +This code is written by `Ikrom Akramov `_ diff --git a/pySDC/projects/Second_orderSDC/check_data_folder.py b/pySDC/projects/Second_orderSDC/check_data_folder.py index f9088d4063..cbd6098bbd 100644 --- a/pySDC/projects/Second_orderSDC/check_data_folder.py +++ b/pySDC/projects/Second_orderSDC/check_data_folder.py @@ -3,7 +3,7 @@ folder_name = "./data" # Check if the folder already exists -if not os.path.isdir(folder_name): # pragma: no cover +if not os.path.isdir(folder_name): # Create the folder os.makedirs(folder_name) else: diff --git a/pySDC/projects/Second_orderSDC/dampedharmonic_oscillator_run_stability.py b/pySDC/projects/Second_orderSDC/dampedharmonic_oscillator_run_stability.py deleted file mode 100644 index 65173d9d1e..0000000000 --- a/pySDC/projects/Second_orderSDC/dampedharmonic_oscillator_run_stability.py +++ /dev/null @@ -1,67 +0,0 @@ -import numpy as np - -from pySDC.implementations.problem_classes.HarmonicOscillator import harmonic_oscillator -from pySDC.implementations.sweeper_classes.boris_2nd_order import boris_2nd_order -from pySDC.projects.Second_orderSDC.penningtrap_Simulation import Stability_implementation - - -def dampedharmonic_oscillator_params(): - """ - Runtine to compute modulues of the stability function - - Returns: - description - """ - - # initialize level parameters - level_params = dict() - level_params['restol'] = 1e-16 - level_params["dt"] = 1.0 - - # initialize problem parameters for the Damped harmonic oscillator problem - problem_params = dict() - problem_params["k"] = 0 - problem_params["mu"] = 0 - problem_params["u0"] = np.array([1, 1]) - - # initialize sweeper parameters - sweeper_params = dict() - sweeper_params['quad_type'] = 'GAUSS' - sweeper_params["num_nodes"] = 3 - sweeper_params["do_coll_update"] = True - sweeper_params["picard_mats_sweep"] = True - - # initialize step parameters - step_params = dict() - step_params['maxiter'] = 50 - - # fill description dictionary for easy step instantiation - description = dict() - description["problem_class"] = harmonic_oscillator - description["problem_params"] = problem_params - description["sweeper_class"] = boris_2nd_order - description["sweeper_params"] = sweeper_params - description["level_params"] = level_params - description["step_params"] = step_params - - return description - - -if __name__ == '__main__': - """ - Damped harmonic oscillatro as test problem for the stability plot: - x'=v - v'=-kappa*x-mu*v - kappa: spring constant - mu: friction - - https://beltoforion.de/en/harmonic_oscillator/ - """ - # exec(open("check_data_folder.py").read()) - description = dampedharmonic_oscillator_params() - Stability = Stability_implementation(description, kappa_max=18, mu_max=18, Num_iter=(200, 200)) - Stability.run_SDC_stability() - Stability.run_Picard_stability() - Stability.run_RKN_stability() - Stability.run_Ksdc() - # Stability.run_Kpicard diff --git a/pySDC/projects/Second_orderSDC/harmonic_oscillator_params.py b/pySDC/projects/Second_orderSDC/harmonic_oscillator_params.py new file mode 100644 index 0000000000..2a5c41f112 --- /dev/null +++ b/pySDC/projects/Second_orderSDC/harmonic_oscillator_params.py @@ -0,0 +1,36 @@ +import numpy as np +from pySDC.implementations.problem_classes.HarmonicOscillator import harmonic_oscillator +from pySDC.implementations.sweeper_classes.boris_2nd_order import boris_2nd_order + + +def get_default_harmonic_oscillator_description(): + """ + Routine to compute modules of the stability function + + Returns: + description (dict): A dictionary containing parameters for the damped harmonic oscillator problem + """ + + # Initialize level parameters + level_params = {'restol': 1e-16, 'dt': 1.0} + + # Initialize problem parameters for the Damped harmonic oscillator problem + problem_params = {'k': 0, 'mu': 0, 'u0': np.array([1, 1])} + + # Initialize sweeper parameters + sweeper_params = {'quad_type': 'GAUSS', 'num_nodes': 3, 'do_coll_update': True, 'picard_mats_sweep': True} + + # Initialize step parameters + step_params = {'maxiter': 50} + + # Fill description dictionary for easy step instantiation + description = { + 'problem_class': harmonic_oscillator, + 'problem_params': problem_params, + 'sweeper_class': boris_2nd_order, + 'sweeper_params': sweeper_params, + 'level_params': level_params, + 'step_params': step_params, + } + + return description diff --git a/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_points.py b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_points.py new file mode 100644 index 0000000000..eb687786e0 --- /dev/null +++ b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_points.py @@ -0,0 +1,31 @@ +import numpy as np +from pySDC.projects.Second_orderSDC.harmonic_oscillator_params import get_default_harmonic_oscillator_description +from pySDC.projects.Second_orderSDC.stability_simulation import compute_and_generate_table + +if __name__ == '__main__': + ''' + This script generates a table to compare stability of the SDC iteration for given points, + exploring different quadrature types, number of nodes, and number of iterations. + + Additional parameters in the function `compute_and_generate_table`: + - To save the table, set: `save_points_table=True`. + - To change the filename, set `points_table_filename='FILENAME'`. Default is './data/point_table.txt'. + ''' + # This code checks if the "data" folder exists or not. + exec(open("check_data_folder.py").read()) + # Get default parameters for the harmonic oscillator problem + description = get_default_harmonic_oscillator_description() + + # Additional parameters to compute stability points + helper_params = { + 'quad_type_list': ('GAUSS', 'LOBATTO'), # List of quadrature types + 'Num_iter': (2, 2), # Number of iterations + 'num_nodes_list': np.arange(3, 6, 1), # List of number of nodes + 'max_iter_list': np.arange(2, 10, 1), # List of maximum iterations + } + + points = ((1, 100), (3, 100), (10, 100)) # Stability parameters: (kappa, mu) + + # Iterate through points and perform stability check + for ii in points: + compute_and_generate_table(description, helper_params, ii, check_stability_point=True, save_points_table=False) diff --git a/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stab_interval.py b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stab_interval.py new file mode 100644 index 0000000000..2ecb5e531a --- /dev/null +++ b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stab_interval.py @@ -0,0 +1,41 @@ +import numpy as np +from pySDC.projects.Second_orderSDC.harmonic_oscillator_params import get_default_harmonic_oscillator_description +from pySDC.projects.Second_orderSDC.stability_simulation import compute_and_generate_table + +if __name__ == '__main__': + ''' + Main script to compute maximum stable values for SDC and Picard iterations for the purely oscillatory case with no damping (mu=0). + + Additional parameters in the `compute_and_generate_table` function: + - To save the stability table, set `save_interval_file=True`. + - To change the filename, set `interval_filename='FILENAME'`. Default is './data/stab_interval.txt'. + + Output: + The script generates data to compare different values of M (number of nodes) and K (maximal number of iterations). + ''' + # This code checks if the "data" folder exists or not. + exec(open("check_data_folder.py").read()) + # Get default parameters for the harmonic oscillator + description = get_default_harmonic_oscillator_description() + + # Additional parameters to compute stability interval on the kappa + # ============================================================================= + # To get exactly the same as table in the paper set: + # 'Num_iter': (500, 1) for the SDC iteration + # 'Num_iter': (2000, 1) for the Picard iteration + # ============================================================================= + helper_params = { + 'quad_type_list': ('GAUSS',), # Type of quadrature + 'Num_iter': (500, 1), # Number of iterations + 'num_nodes_list': np.arange(2, 7, 1), # List of number of nodes + 'max_iter_list': np.arange(1, 11, 1), # List of maximum iterations + } + + points = ((100, 1e-10),) # Stability parameters: (Num_iter, mu) + + # Iterate through points and perform stability check + for ii in points: + # If you want to get the table for the Picard iteration set Picard=True + compute_and_generate_table( + description, helper_params, ii, compute_interval=True, Picard=False, save_interval_file=True + ) diff --git a/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stability.py b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stability.py new file mode 100644 index 0000000000..3f0e7325cd --- /dev/null +++ b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stability.py @@ -0,0 +1,33 @@ +from pySDC.projects.Second_orderSDC.harmonic_oscillator_params import get_default_harmonic_oscillator_description +from pySDC.projects.Second_orderSDC.stability_simulation import StabilityImplementation + + +if __name__ == '__main__': + """ + To implement Stability region for the Harmonic Oscillator problem + Run for + SDC stability region: model_stab.run_SDC_stability() + Picard stability region: model_stab.run_Picard_stability() + Runge-Kutta-Nzström stability region: model_run_RKN_stability() + To implement spectral radius of iteration matrix + Run: + Iteration matrix of SDC method: model_stab.run_Ksdc() + Iteration matrix of Picard method: model_stab.run_Kpicard() + + """ + # This code checks if the "data" folder exists or not. + exec(open("check_data_folder.py").read()) + # Execute the stability analysis for the damped harmonic oscillator + description = get_default_harmonic_oscillator_description() + # ============================================================================= + # maxiter can be changed here manually. By default, maxiter is 50 + description['step_params']['maxiter'] = 50 + # ============================================================================= + + model_stab = StabilityImplementation(description, kappa_max=18, mu_max=18, Num_iter=(200, 200)) + + model_stab.run_SDC_stability() + model_stab.run_Picard_stability() + model_stab.run_RKN_stability() + model_stab.run_Ksdc() + # model_stab.run_Kpicard() diff --git a/pySDC/projects/Second_orderSDC/penningtrap_Simulation.py b/pySDC/projects/Second_orderSDC/penningtrap_Simulation.py index 3c2e08d426..480cfe064b 100644 --- a/pySDC/projects/Second_orderSDC/penningtrap_Simulation.py +++ b/pySDC/projects/Second_orderSDC/penningtrap_Simulation.py @@ -1,419 +1,15 @@ -# import matplotlib - -# matplotlib.use('Agg') -# import os - -import matplotlib.pyplot as plt import numpy as np from pySDC.helpers.stats_helper import get_sorted from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.projects.Second_orderSDC.penningtrap_HookClass import particles_output from pySDC.implementations.sweeper_classes.Runge_Kutta_Nystrom import RKN, Velocity_Verlet -from pySDC.core.Errors import ProblemError -from pySDC.core.Step import step - - -def fixed_plot_params(): # pragma: no cover - """ - Setting fixed parameters for the all of the plots - """ - fs = 16 - plt.rcParams['figure.figsize'] = 7.44, 6.74 - plt.rcParams['pgf.rcfonts'] = False - - plt.rcParams['lines.linewidth'] = 2.5 - plt.rcParams['axes.titlesize'] = fs + 5 - plt.rcParams['axes.labelsize'] = fs + 5 - plt.rcParams['xtick.labelsize'] = fs - plt.rcParams['ytick.labelsize'] = fs - plt.rcParams['xtick.major.pad'] = 5 - plt.rcParams['ytick.major.pad'] = 5 - plt.rcParams['axes.labelpad'] = 6 - plt.rcParams['lines.markersize'] = fs - 2 - plt.rcParams['lines.markeredgewidth'] = 1 - plt.rcParams['mathtext.fontset'] = 'cm' - plt.rcParams['mathtext.rm'] = 'serif' - plt.rc('font', size=fs) - - -class plotmanager(object): # pragma: no cover - """ - This class generates all of the plots of the Second-order SDC plots. - """ - - def __init__(self, controller_params, description, time_iter=3, K_iter=(1, 2, 3), Tend=2, axes=(1,), cwd=''): - self.controller_params = controller_params - self.description = description - self.time_iter = time_iter - self.K_iter = K_iter - self.Tend = Tend - self.axes = axes - self.cwd = cwd - self.quad_type = self.description['sweeper_params']['quad_type'] - self.num_nodes = self.description['sweeper_params']['num_nodes'] - self.error_type = 'local' - - def plot_convergence(self): # pragma: no cover - """ - Plot convergence order plots for the position and velocity - If you change parameters of the values you need set y_lim values need to set manually - """ - fixed_plot_params() - [N, time_data, error_data, order_data, convline] = self.organize_data( - filename='data/dt_vs_{}_errorSDC.csv'.format(self.error_type) - ) - - color = ['r', 'brown', 'g', 'blue'] - shape = ['o', 'd', 's', 'x'] - - fig1, ax1 = plt.subplots() - fig2, ax2 = plt.subplots() - value = self.axes[0] - for ii in range(0, N): - ax1.loglog(time_data[ii, :], convline['pos'][value, ii, :], color='black') - ax1.loglog( - time_data[ii, :], - error_data['pos'][value, ii, :], - ' ', - color=color[ii], - marker=shape[ii], - label='k={}'.format(int(self.K_iter[ii])), - ) - if value == 2: - ax1.text( - time_data[ii, 1], - 0.25 * convline['pos'][value, ii, 1], - r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['pos'][ii, 0, 1]), - size=18, - ) - else: - ax1.text( - time_data[ii, 1], - 0.25 * convline['pos'][value, ii, 1], - r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['pos'][ii, 0, 0]), - size=18, - ) - - if self.error_type == 'Local': - ax1.set_ylabel(r'$\Delta x^{\mathrm{(abs)}}_{%d}$' % (value + 1)) - else: - ax1.set_ylabel(r'$\Delta x^{\mathrm{(rel)}}_{%d}$' % (value + 1)) - ax1.set_title('{} order of convergence, $M={}$'.format(self.error_type, self.num_nodes)) - ax1.set_xlabel(r'$\omega_{B} \cdot \Delta t$') - - ax1.legend(loc='best') - fig1.tight_layout() - fig1.savefig(self.cwd + 'data/{}_conv_plot_pos{}.pdf'.format(self.error_type, value + 1)) - - for ii in range(0, N): - ax2.loglog(time_data[ii, :], convline['vel'][value, ii, :], color='black') - ax2.loglog( - time_data[ii, :], - error_data['vel'][value, ii, :], - ' ', - color=color[ii], - marker=shape[ii], - label='k={}'.format(int(self.K_iter[ii])), - ) - - if value == 2: - ax2.text( - time_data[ii, 1], - 0.25 * convline['vel'][value, ii, 1], - r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['vel'][ii, 0, 1]), - size=18, - ) - else: - ax2.text( - time_data[ii, 1], - 0.25 * convline['vel'][value, ii, 1], - r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['vel'][ii, 0, 0]), - size=18, - ) - - if self.error_type == 'Local': - ax2.set_ylabel(r'$\Delta v^{\mathrm{(abs)}}_{%d}$' % (value + 1)) - else: - ax2.set_ylabel(r'$\Delta v^{\mathrm{(rel)}}_{%d}$' % (value + 1)) - ax2.set_title(r'{} order of convergence, $M={}$'.format(self.error_type, self.num_nodes)) - ax2.set_xlabel(r'$\omega_{B} \cdot \Delta t$') - # ============================================================================= - # Setting y axis min and max values - # ============================================================================= - if self.error_type == 'global': - ax2.set_ylim(1e-14, 1e1) - ax1.set_ylim(1e-14, 1e1) - else: - ax2.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) - ax1.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) - ax2.legend(loc='best') - fig2.tight_layout() - plt.show() - fig2.savefig(self.cwd + 'data/{}_conv_plot_vel{}.pdf'.format(self.error_type, value + 1)) - - def format_number(self, data_value, indx): # pragma: no cover - """ - Change format of the x axis for the work precision plots - """ - if data_value >= 1_000_000: - formatter = "{:1.1f}M".format(data_value * 0.000_001) - else: - formatter = "{:1.0f}K".format(data_value * 0.001) - return formatter - - def plot_work_precision(self): # pragma: no cover - """ - Generate work precision plots - """ - fixed_plot_params() - [N, func_eval_SDC, error_SDC, *_] = self.organize_data( - filename=self.cwd + 'data/rhs_eval_vs_global_errorSDC.csv', - time_iter=self.time_iter, - ) - - [N, func_eval_picard, error_picard, *_] = self.organize_data( - filename=self.cwd + 'data/rhs_eval_vs_global_errorPicard.csv', - time_iter=self.time_iter, - ) +from pySDC.projects.Second_orderSDC.plot_helper import PlotManager - color = ['r', 'brown', 'g', 'blue'] - shape = ['o', 'd', 's', 'x'] - fig1, ax1 = plt.subplots() - fig2, ax2 = plt.subplots() - value = self.axes[0] - if self.RK: - [N, func_eval_RKN, error_RKN, *_] = self.organize_data( - filename=self.cwd + 'data/rhs_eval_vs_global_errorRKN.csv', - time_iter=self.time_iter, - ) - - ax1.loglog( - func_eval_RKN[0], - error_RKN['pos'][value,][0][:], - ls='dashdot', - color='purple', - marker='p', - label='RKN-4', - ) - ax2.loglog( - func_eval_RKN[0], - error_RKN['vel'][value,][0][:], - ls='dashdot', - color='purple', - marker='p', - label='RKN-4', - ) - if self.VV: - [N, func_eval_VV, error_VV, *_] = self.organize_data( - filename=self.cwd + 'data/rhs_eval_vs_global_errorVV.csv', - time_iter=self.time_iter, - ) - - ax1.loglog( - func_eval_VV[0], - error_VV['pos'][value,][0][:], - ls='dashdot', - color='blue', - marker='H', - label='Velocity-Verlet', - ) - ax2.loglog( - func_eval_VV[0], - error_VV['vel'][value,][0][:], - ls='dashdot', - color='blue', - marker='H', - label='Velocity-Verlet', - ) - - for ii, jj in enumerate(self.K_iter): - # ============================================================================= - # # If you want to get exactly the same picture like in paper uncomment this only for vertical axis - # if ii==0 or ii==1: - # ax1.loglog(func_eval_SDC[ii, :][1:], error_SDC['pos'][value, ii, :][1:], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) - # ax1.loglog(func_eval_picard[ii,:][1:], error_picard['pos'][value, ii, :][1:], ls='--', color=color[ii], marker=shape[ii]) - - # ax2.loglog(func_eval_SDC[ii, :][1:], error_SDC['vel'][value, ii, :][1:], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) - # ax2.loglog(func_eval_picard[ii,:][1:], error_picard['vel'][value, ii, :][1:], ls='--', color=color[ii], marker=shape[ii]) - # else: - - # ax1.loglog(func_eval_SDC[ii, :][:-1], error_SDC['pos'][value, ii, :][:-1], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) - # ax1.loglog(func_eval_picard[ii,:][:-1], error_picard['pos'][value, ii, :][:-1], ls='--', color=color[ii], marker=shape[ii]) - - # ax2.loglog(func_eval_SDC[ii, :][:-1], error_SDC['vel'][value, ii, :][:-1], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) - # ax2.loglog(func_eval_picard[ii,:][:-1], error_picard['vel'][value, ii, :][:-1], ls='--', color=color[ii], marker=shape[ii]) - # - # ============================================================================= - ax1.loglog( - func_eval_SDC[ii, :], - error_SDC['pos'][value, ii, :], - ls='solid', - color=color[ii], - marker=shape[ii], - label='k={}'.format(jj), - ) - ax1.loglog( - func_eval_picard[ii, :], error_picard['pos'][value, ii, :], ls='--', color=color[ii], marker=shape[ii] - ) - - ax2.loglog( - func_eval_SDC[ii, :], - error_SDC['vel'][value, ii, :], - ls='solid', - color=color[ii], - marker=shape[ii], - label='k={}'.format(jj), - ) - ax2.loglog( - func_eval_picard[ii, :], error_picard['vel'][value, ii, :], ls='--', color=color[ii], marker=shape[ii] - ) - - xmin = np.min(ax1.get_xlim()) - xmax = np.max(ax2.get_xlim()) - xmin = round(xmin, -3) - xmax = round(xmax, -3) - - xx = np.linspace(np.log(xmin), np.log(xmax), 5) - xx = 3**xx - xx = xx[np.where(xx < xmax)] - # xx=[2*1e+3,4*1e+3, 8*1e+3] - ax1.grid(True) - - ax1.set_title("$M={}$".format(self.num_nodes)) - ax1.set_xlabel("Number of RHS evaluations") - ax1.set_ylabel(r'$\Delta x^{\mathrm{(rel)}}_{%d}$' % (value + 1)) - ax1.loglog([], [], color="black", ls="--", label="Picard iteration") - ax1.loglog([], [], color="black", ls="solid", label="Boris-SDC iteration") - - ax1.set_xticks(xx) - ax1.xaxis.set_major_formatter(self.format_number) - ax1.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) - # ax1.set_ylim(1e-14, 1e+0) - - ax1.legend(loc="best", fontsize=12) - fig1.tight_layout() - fig1.savefig(self.cwd + "data/f_eval_pos_{}_M={}.pdf".format(value, self.num_nodes)) - - ax2.grid(True) - ax2.xaxis.set_major_formatter(self.format_number) - ax2.set_title("$M={}$".format(self.num_nodes)) - ax2.set_xlabel("Number of RHS evaluations") - ax2.set_ylabel(r'$\Delta v^{\mathrm{(rel)}}_{%d}$' % (value + 1)) - ax2.loglog([], [], color="black", ls="--", label="Picard iteration") - ax2.loglog([], [], color="black", ls="solid", label="Boris-SDC iteration") - ax2.set_xticks(xx) - ax2.xaxis.set_major_formatter(self.format_number) - ax2.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) - # ax2.set_ylim(1e-14, 1e+0) - ax2.legend(loc="best", fontsize=12) - fig2.tight_layout() - fig2.savefig(self.cwd + "data/f_eval_vel_{}_M={}.pdf".format(value, self.num_nodes)) - plt.show() - - def organize_data(self, filename='data/dt_vs_local_errorSDC.csv', time_iter=None): # pragma: no cover - """ - Organize data according to plot - Args: - filename (string): data to find approximate order - time_iter : in case it you used different time iterations - """ - if time_iter == None: - time_iter = self.time_iter - - items = np.genfromtxt(filename, delimiter=',', skip_header=1) - time = items[:, 0] - N = int(np.size(time) / time_iter) - - error_data = {'pos': np.zeros([3, N, time_iter]), 'vel': np.zeros([3, N, time_iter])} - order_data = {'pos': np.zeros([N, time_iter, 2]), 'vel': np.zeros([N, time_iter, 2])} - time_data = np.zeros([N, time_iter]) - convline = {'pos': np.zeros([3, N, time_iter]), 'vel': np.zeros([3, N, time_iter])} - - time_data = time.reshape([N, time_iter]) - - order_data['pos'][:, :, 0] = items[:, 1].reshape([N, time_iter]) - order_data['pos'][:, :, 1] = items[:, 2].reshape([N, time_iter]) - order_data['vel'][:, :, 0] = items[:, 6].reshape([N, time_iter]) - order_data['vel'][:, :, 1] = items[:, 7].reshape([N, time_iter]) - - for ii in range(0, 3): - error_data['pos'][ii, :, :] = items[:, ii + 3].reshape([N, time_iter]) - error_data['vel'][ii, :, :] = items[:, ii + 8].reshape([N, time_iter]) - - for jj in range(0, 3): - if jj == 2: - convline['pos'][jj, :, :] = ( - (time_data / time_data[0, 0]).T ** order_data['pos'][:, jj, 1] - ).T * error_data['pos'][jj, :, 0][:, None] - convline['vel'][jj, :, :] = ( - (time_data / time_data[0, 0]).T ** order_data['vel'][:, jj, 1] - ).T * error_data['vel'][jj, :, 0][:, None] - else: - convline['pos'][jj, :, :] = ( - (time_data / time_data[0, 0]).T ** order_data['pos'][:, jj, 0] - ).T * error_data['pos'][jj, :, 0][:, None] - convline['vel'][jj, :, :] = ( - (time_data / time_data[0, 0]).T ** order_data['vel'][:, jj, 0] - ).T * error_data['vel'][jj, :, 0][:, None] - - return [N, time_data, error_data, order_data, convline] - - # find approximate order - def find_approximate_order(self, filename='data/dt_vs_local_errorSDC.csv'): - """ - This function finds approximate convergence rate and saves in the data folder - Args: - filename: given data - return: - None - """ - [N, time_data, error_data, order_data, convline] = self.organize_data(self.cwd + filename) - approx_order = {'pos': np.zeros([1, N]), 'vel': np.zeros([1, N])} - - for jj in range(0, 3): - if jj == 0: - file = open(self.cwd + 'data/{}_order_vs_approx_order.csv'.format(self.error_type), 'w') - - else: - file = open(self.cwd + 'data/{}_order_vs_approx_order.csv'.format(self.error_type), 'a') - - for ii in range(0, N): - approx_order['pos'][0, ii] = np.polyfit( - np.log(time_data[ii, :]), np.log(error_data['pos'][jj, ii, :]), 1 - )[0].real - approx_order['vel'][0, ii] = np.polyfit( - np.log(time_data[ii, :]), np.log(error_data['vel'][jj, ii, :]), 1 - )[0].real - if jj == 2: - file.write( - str(order_data['pos'][:, jj, 1]) - + ' | ' - + str(approx_order['pos'][0]) - + ' | ' - + str(order_data['vel'][:, jj, 1]) - + ' | ' - + str(approx_order['vel'][0]) - + '\n' - ) - else: - file.write( - str(order_data['pos'][:, jj, 0]) - + ' | ' - + str(approx_order['pos'][0]) - + ' | ' - + str(order_data['vel'][:, jj, 0]) - + ' | ' - + str(approx_order['vel'][0]) - + '\n' - ) - file.close() - - -class compute_error(plotmanager): +class ComputeError(PlotManager): """ - This class generates the data for the plots and computations for Second-order SDC + This class generates data for plots and computations for Second-order SDC """ def __init__(self, controller_params, description, time_iter=3, K_iter=(1, 2, 3), Tend=2, axes=(1,), cwd=''): @@ -421,30 +17,29 @@ def __init__(self, controller_params, description, time_iter=3, K_iter=(1, 2, 3) controller_params, description, time_iter=time_iter, K_iter=K_iter, Tend=Tend, axes=axes, cwd='' ) - def run_local_error(self): # pragma: no cover + def run_local_error(self): """ - This function controlls everything to generate local convergence rate + Controls everything to generate local convergence rate """ self.compute_local_error_data() - # self.find_approximate_order() self.plot_convergence() - def run_global_error(self): # pragma: no cover + def run_global_error(self): """ - This function for the global convergence order together it finds approximate order + Computes global convergence order and finds approximate order """ self.error_type = 'global' self.compute_global_error_data() self.find_approximate_order(filename='data/dt_vs_global_errorSDC.csv') self.plot_convergence() - def run_work_precision(self, RK=True, VV=False, dt_cont=1): # pragma: no cover + def run_work_precision(self, RK=True, VV=False, dt_cont=1): """ - To implement work-precision of Second-order SDC + Implements work-precision of Second-order SDC Args: - RK: True or False to include in the picture RKN method - VV: True or False to include in the picture Velocity-Verlet Scheme - dt_cont: moves RK and VV left to right (I could't find the best way instead of this) + RK: True or False to include RKN method + VV: True or False to include Velocity-Verlet Scheme + dt_cont: moves RK and VV left to right """ self.RK = RK self.VV = VV @@ -458,9 +53,8 @@ def run_work_precision(self, RK=True, VV=False, dt_cont=1): # pragma: no cover def compute_local_error_data(self): """ - Compute local convergece rate and save this data + Computes local convergence rate and saves the data """ - step_params = dict() dt_val = self.description['level_params']['dt'] @@ -468,208 +62,126 @@ def compute_local_error_data(self): step_params['maxiter'] = order self.description['step_params'] = step_params - if order == self.K_iter[0]: - file = open(self.cwd + 'data/dt_vs_local_errorSDC.csv', 'w') - file.write( - str('Time_steps') - + " | " - + str('Order_pos') - + " | " - + str('Abs_error_position') - + " | " - + str('Order_vel') - + " | " - + str('Abs_error_velocity') - + '\n' - ) - else: - file = open(self.cwd + 'data/dt_vs_local_errorSDC.csv', 'a') - - for ii in range(0, self.time_iter): - dt = dt_val / 2**ii - - self.description['level_params']['dt'] = dt - self.description['level_params'] = self.description['level_params'] - - # instantiate the controller (no controller parameters used here) - controller = controller_nonMPI( - num_procs=1, controller_params=self.controller_params, description=self.description - ) - - # set time parameters - t0 = 0.0 - Tend = dt - - # get initial values on finest level - P = controller.MS[0].levels[0].prob - uinit = P.u_init() - - # call main function to get things done... - uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) - - # compute exact solution and compare - uex = P.u_exact(Tend) - - # find order of quadrature rule - coll_order = controller.MS[0].levels[0].sweep.coll.order - - # find order of convergence for the postion and velocity - order_pos = list(self.local_order_pos(order, coll_order)) - order_vel = list(self.local_order_vel(order, coll_order)) - # evaluate error - error_pos = list(np.abs((uex - uend).pos).T[0]) - error_vel = list(np.abs((uex - uend).vel).T[0]) - - dt_omega = dt * self.description['problem_params']['omega_B'] - file.write( - str(dt_omega) - + ", " - + str(', '.join(map(str, order_pos))) - + ", " - + str(', '.join(map(str, error_pos))) - + ", " - + str(', '.join(map(str, order_vel))) - + ", " - + str(', '.join(map(str, error_vel))) - + '\n' - ) - - file.close() + file_path = self.cwd + 'data/dt_vs_local_errorSDC.csv' + mode = 'w' if order == self.K_iter[0] else 'a' + + with open(file_path, mode) as file: + if order == self.K_iter[0]: + file.write("Time_steps | Order_pos | Abs_error_position | Order_vel | Abs_error_velocity\n") + + for ii in range(0, self.time_iter): + dt = dt_val / 2**ii + + self.description['level_params']['dt'] = dt + self.description['level_params'] = self.description['level_params'] + + controller = controller_nonMPI( + num_procs=1, controller_params=self.controller_params, description=self.description + ) + + t0, Tend = 0.0, dt + P = controller.MS[0].levels[0].prob + uinit = P.u_init() + + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + uex = P.u_exact(Tend) + coll_order = controller.MS[0].levels[0].sweep.coll.order + order_pos = list(self.local_order_pos(order, coll_order)) + order_vel = list(self.local_order_vel(order, coll_order)) + error_pos = list(np.abs((uex - uend).pos).T[0]) + error_vel = list(np.abs((uex - uend).vel).T[0]) + + dt_omega = dt * self.description['problem_params']['omega_B'] + file.write( + f"{dt_omega}, {', '.join(map(str, order_pos))}, {', '.join(map(str, error_pos))}," + f" {', '.join(map(str, order_vel))}, {', '.join(map(str, error_vel))}\n" + ) def compute_global_error_data(self, Picard=False, RK=False, VV=False, work_counter=False, dt_cont=1): """ - Compute global convergence data and save it into the data folder + Computes global convergence data and saves it into the data folder Args: Picard: bool, Picard iteration computation RK: bool, RKN method VV: bool, Velocity-Verlet scheme - work_counter: bool, compute rhs for the work precision + work_counter: bool, compute rhs for work precision dt_cont: moves the data left to right for RK and VV method """ - K_iter = self.K_iter + name, description = '', self.description + if Picard: name = 'Picard' - description = self.description description['sweeper_params']['QI'] = 'PIC' description['sweeper_params']['QE'] = 'PIC' - elif RK: - K_iter = (1,) - name = 'RKN' - description = self.description - description['sweeper_class'] = RKN + K_iter, name, description['sweeper_class'] = (1,), 'RKN', RKN elif VV: - K_iter = (1,) - name = 'VV' - description = self.description - description['sweeper_class'] = Velocity_Verlet + K_iter, name, description['sweeper_class'] = (1,), 'VV', Velocity_Verlet else: name = 'SDC' - description = self.description + self.controller_params['hook_class'] = particles_output - step_params = dict() - dt_val = self.description['level_params']['dt'] + step_params, dt_val = dict(), self.description['level_params']['dt'] + values, error = ['position', 'velocity'], dict() - values = ['position', 'velocity'] + filename = f"data/{'rhs_eval_vs_global_error' if work_counter else 'dt_vs_global_error'}{name}.csv" - error = dict() + for order in K_iter: + u_val, uex_val = dict(), dict() + step_params['maxiter'], description['step_params'] = order, step_params - if work_counter: - filename = 'data/rhs_eval_vs_global_error{}.csv'.format(name) - else: - filename = 'data/dt_vs_global_error{}.csv'.format(name) + file_path = self.cwd + filename + mode = 'w' if order == K_iter[0] else 'a' - for order in K_iter: - u_val = dict() - uex_val = dict() + with open(file_path, mode) as file: + if order == K_iter[0]: + file.write( + "Time_steps/Work_counter | Order_pos | Abs_error_position | Order_vel | Abs_error_velocity\n" + ) + + cont = 2 if self.time_iter == 3 else 2 ** abs(3 - self.time_iter) + cont = cont if not Picard else dt_cont + + for ii in range(0, self.time_iter): + dt = (dt_val * cont) / 2**ii + + description['level_params']['dt'] = dt + description['level_params'] = self.description['level_params'] + + controller = controller_nonMPI( + num_procs=1, controller_params=self.controller_params, description=description + ) + + t0, Tend = 0.0, self.Tend + P = controller.MS[0].levels[0].prob + uinit = P.u_init() + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + func_eval = P.work_counters['Boris_solver'].niter + P.work_counters['rhs'].niter + + for nn in values: + u_val[nn] = get_sorted(stats, type=nn, sortby='time') + uex_val[nn] = get_sorted(stats, type=nn + '_exact', sortby='time') + error[nn] = self.relative_error(uex_val[nn], u_val[nn]) + error[nn] = list(error[nn].T[0]) + + if RK or VV: + global_order = np.array([4, 4]) + else: + coll_order = controller.MS[0].levels[0].sweep.coll.order + global_order = list(self.global_order(order, coll_order)) + global_order = np.array([4, 4]) if RK or VV else list(self.global_order(order, coll_order)) + + dt_omega = dt * self.description['problem_params']['omega_B'] + save = func_eval if work_counter else dt_omega + + file.write( + f"{save}, {', '.join(map(str, global_order))}, {', '.join(map(str, error['position']))}," + f" {', '.join(map(str, global_order))}, {', '.join(map(str, error['velocity']))}\n" + ) - step_params['maxiter'] = order - description['step_params'] = step_params - - if order == K_iter[0]: - file = open(self.cwd + filename, 'w') - file.write( - str('Time_steps/Work_counter') - + " | " - + str('Order_pos') - + " | " - + str('Abs_error_position') - + " | " - + str('Order_vel') - + " | " - + str('Abs_error_velocity') - + '\n' - ) - else: - file = open(self.cwd + filename, 'a') - - # Controller for plot - if Picard: - if self.time_iter == 3: - cont = 2 - else: - tt = np.abs(3 - self.time_iter) - cont = 2**tt + 2 - else: - cont = dt_cont - - for ii in range(0, self.time_iter): - dt = (dt_val * cont) / 2**ii - - description['level_params']['dt'] = dt - description['level_params'] = self.description['level_params'] - - # instantiate the controller (no controller parameters used here) - controller = controller_nonMPI( - num_procs=1, controller_params=self.controller_params, description=description - ) - - # set time parameters - t0 = 0.0 - # Tend = dt - Tend = self.Tend - - # get initial values on finest level - P = controller.MS[0].levels[0].prob - uinit = P.u_init() - - # call main function to get things done... - uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) - # rhs function evaluation - func_eval = P.work_counters['Boris_solver'].niter + P.work_counters['rhs'].niter - # extract values from stats - for _, nn in enumerate(values): - u_val[nn] = get_sorted(stats, type=nn, sortby='time') - uex_val[nn] = get_sorted(stats, type=nn + '_exact', sortby='time') - error[nn] = self.relative_error(uex_val[nn], u_val[nn]) - error[nn] = list(error[nn].T[0]) - - if RK or VV: - global_order = np.array([4, 4]) - else: - coll_order = controller.MS[0].levels[0].sweep.coll.order - global_order = list(self.global_order(order, coll_order)) - dt_omega = dt * self.description['problem_params']['omega_B'] - if work_counter: - save = func_eval - else: - save = dt_omega - file.write( - str(save) - + ", " - + str(', '.join(map(str, global_order))) - + ", " - + str(', '.join(map(str, error['position']))) - + ", " - + str(', '.join(map(str, global_order))) - + ", " - + str(', '.join(map(str, error['velocity']))) - + '\n' - ) - file.close() - - # find expected local convergence order for position def local_order_pos(self, order_K, order_quad): if self.description['sweeper_params']['initial_guess'] == 'spread': if self.quad_type == 'GAUSS' or self.quad_type == 'RADAU-RIGHT': @@ -677,16 +189,15 @@ def local_order_pos(self, order_K, order_quad): elif self.quad_type == 'LOBATTO' or self.quad_type == 'RADAU-LEFT': return np.array([np.min([order_K + 2 + 2, order_quad]), np.min([2 * order_K + 3, order_quad])]) else: - raise NotImplementedError('order of convergence explicitly not implemented ') + raise NotImplementedError('Order of convergence explicitly not implemented') else: if self.quad_type == 'GAUSS' or self.quad_type == 'RADAU-RIGHT': return np.array([np.min([order_K + 2, order_quad]), np.min([2 * order_K + 3, order_quad])]) elif self.quad_type == 'LOBATTO' or self.quad_type == 'RADAU-LEFT': return np.array([np.min([order_K + 2, order_quad]), np.min([2 * order_K + 3, order_quad])]) else: - raise NotImplementedError('order of convergence explicitly not implemented ') + raise NotImplementedError('Order of convergence explicitly not implemented') - # find expected local convergence order for velocity def local_order_vel(self, order_K, order_quad): if self.description['sweeper_params']['initial_guess'] == 'spread': if self.quad_type == 'GAUSS' or self.quad_type == 'RADAU-RIGHT': @@ -694,229 +205,24 @@ def local_order_vel(self, order_K, order_quad): elif self.quad_type == 'LOBATTO' or self.quad_type == 'RADAU-LEFT': return np.array([np.min([order_K + 1 + 2, order_quad]), np.min([2 * order_K + 2, order_quad])]) else: - raise NotImplementedError('order of convergence explicitly not implemented ') + raise NotImplementedError('Order of convergence explicitly not implemented') else: if self.quad_type == 'GAUSS' or self.quad_type == 'RADAU-RIGHT': return np.array([np.min([order_K + 1, order_quad]), np.min([2 * order_K + 2, order_quad])]) elif self.quad_type == 'LOBATTO' or self.quad_type == 'RADAU-LEFT': return np.array([np.min([order_K + 1, order_quad]), np.min([2 * order_K + 2, order_quad])]) else: - raise NotImplementedError('order of convergence explicitly not implemented ') + raise NotImplementedError('Order of convergence explicitly not implemented') - # find expected global convergence order def global_order(self, order_K, order_quad): if self.quad_type == 'GAUSS' or self.quad_type == 'RADAU-RIGHT': return np.array([np.min([order_K, order_quad]), np.min([2 * order_K, order_quad])]) elif self.quad_type == 'LOBATTO' or self.quad_type == 'RADAU-LEFT': return np.array([np.min([order_K, order_quad]), np.min([2 * order_K, order_quad])]) + 2 else: - raise NotImplementedError('order of convergence explicitly not implemented ') + raise NotImplementedError('Order of convergence explicitly not implemented') - # compute relative error def relative_error(self, uex_data, u_data): u_ex = np.array([entry[1] for entry in uex_data]) u = np.array([entry[1] for entry in u_data]) return np.linalg.norm(np.abs((u_ex - u)), np.inf, 0) / np.linalg.norm(u_ex, np.inf, 0) - - -class Stability_implementation(object): - """ - Routine to compute the stability domains of different configurations of SDC - """ - - def __init__(self, description, kappa_max=20, mu_max=20, Num_iter=(400, 400), cwd=''): - self.description = description - self.kappa_max = kappa_max - self.mu_max = mu_max - self.kappa_iter = Num_iter[0] - self.mu_iter = Num_iter[1] - self.lambda_kappa = np.linspace(0.0, self.kappa_max, self.kappa_iter) - self.lambda_mu = np.linspace(0.0, self.mu_max, self.mu_iter) - self.K_iter = description['step_params']['maxiter'] - self.num_nodes = description['sweeper_params']['num_nodes'] - self.dt = description['level_params']['dt'] - self.SDC, self.Ksdc, self.picard, self.Kpicard = self.stability_data() - self.cwd = cwd - - def stability_data(self): - """ - Computes stability domain matrix for the Harmonic oscillator problem - Returns: - numpy.ndarray: domain_SDC - numpy.ndarray: domain_Ksdc - numpy.ndarray: domain_picard - numpy.ndarray: domain_Kpicard - """ - S = step(description=self.description) - - L = S.levels[0] - - Q = L.sweep.coll.Qmat[1:, 1:] - QQ = np.dot(Q, Q) - num_nodes = L.sweep.coll.num_nodes - dt = L.params.dt - Q_coll = np.block([[QQ, np.zeros([num_nodes, num_nodes])], [np.zeros([num_nodes, num_nodes]), Q]]) - qQ = np.dot(L.sweep.coll.weights, Q) - - ones = np.block([[np.ones(num_nodes), np.zeros(num_nodes)], [np.zeros(num_nodes), np.ones(num_nodes)]]) - - q_mat = np.block( - [ - [dt**2 * qQ, np.zeros(num_nodes)], - [np.zeros(num_nodes), dt * L.sweep.coll.weights], - ] - ) - - domain_SDC = np.zeros((self.kappa_iter, self.mu_iter), dtype="complex") - domain_picard = np.zeros((self.kappa_iter, self.mu_iter)) - domain_Ksdc = np.zeros((self.kappa_iter, self.mu_iter)) - domain_Kpicard = np.zeros((self.kappa_iter, self.mu_iter)) - for i in range(0, self.kappa_iter): - for j in range(0, self.mu_iter): - k = self.lambda_kappa[i] - mu = self.lambda_mu[j] - F = np.block( - [ - [-k * np.eye(num_nodes), -mu * np.eye(num_nodes)], - [-k * np.eye(num_nodes), -mu * np.eye(num_nodes)], - ] - ) - if self.K_iter != 0: - lambdas = [k, mu] - SDC_mat_sweep, Ksdc_eigval = L.sweep.get_scalar_problems_manysweep_mats( - nsweeps=self.K_iter, lambdas=lambdas - ) - if L.sweep.params.picard_mats_sweep: - ( - Picard_mat_sweep, - Kpicard_eigval, - ) = L.sweep.get_scalar_problems_picardsweep_mats(nsweeps=self.K_iter, lambdas=lambdas) - else: - ProblemError("Picard interation is False") - domain_Ksdc[i, j] = Ksdc_eigval - if L.sweep.params.picard_mats_sweep: - domain_Kpicard[i, j] = Kpicard_eigval - - else: - SDC_mat_sweep = np.linalg.inv(np.eye(2 * num_nodes) - dt * np.dot(Q_coll, F)) - - if L.sweep.params.do_coll_update: - FSDC = np.dot(F, SDC_mat_sweep) - Rsdc_mat = np.array([[1.0, dt], [0, 1.0]]) + np.dot(q_mat, FSDC) @ ones.T - stab_func, v = np.linalg.eig(Rsdc_mat) - - if L.sweep.params.picard_mats_sweep: - FPicard = np.dot(F, Picard_mat_sweep) - Rpicard_mat = np.array([[1.0, dt], [0, 1.0]]) + np.dot(q_mat, FPicard) @ ones.T - stab_func_picard, v = np.linalg.eig(Rpicard_mat) - else: - pass - raise ProblemError("Collocation update step is only works for True") - - domain_SDC[i, j] = np.max(np.abs(stab_func)) - if L.sweep.params.picard_mats_sweep: - domain_picard[i, j] = np.max(np.abs(stab_func_picard)) - - return ( - dt * domain_SDC.real, - dt * domain_Ksdc.real, - dt * domain_picard.real, - dt * domain_Kpicard.real, - ) - - def stability_function_RKN(self, k, mu, dt): - """ - Stability function of RKN method - - Returns: - float: maximum absolute values of eigvales - """ - A = np.array([[0, 0, 0, 0], [0.5, 0, 0, 0], [0, 0.5, 0, 0], [0, 0, 1, 0]]) - B = np.array([[0, 0, 0, 0], [0.125, 0, 0, 0], [0.125, 0, 0, 0], [0, 0, 0.5, 0]]) - c = np.array([0, 0.5, 0.5, 1]) - b = np.array([1 / 6, 2 / 6, 2 / 6, 1 / 6]) - bA = np.array([1 / 6, 1 / 6, 1 / 6, 0]) - L = np.eye(4) + k * (dt**2) * B + mu * dt * A - R = np.block([[-k * np.ones(4)], [-(k * c + mu * np.ones(4))]]) - - K = np.linalg.inv(L) @ R.T - C = np.block([[dt**2 * bA], [dt * b]]) - Y = np.array([[1, dt], [0, 1]]) + C @ K - eigval = np.linalg.eigvals(Y) - - return np.max(np.abs(eigval)) - - def stability_data_RKN(self): - """ - Compute and store values into a matrix - - Returns: - numpy.ndarray: stab_RKN - """ - stab_RKN = np.zeros([self.kappa_iter, self.mu_iter]) - for ii, kk in enumerate(self.lambda_kappa): - for jj, mm in enumerate(self.lambda_mu): - stab_RKN[jj, ii] = self.stability_function_RKN(kk, mm, self.dt) - - return stab_RKN - - def plot_stability(self, region, title=""): # pragma: no cover - """ - Plotting runtine for moduli - - Args: - stabval (numpy.ndarray): moduli - title: title for the plot - """ - fixed_plot_params() - lam_k_max = np.amax(self.lambda_kappa) - lam_mu_max = np.amax(self.lambda_mu) - - plt.figure() - levels = np.array([0.25, 0.5, 0.75, 0.9, 1.0, 1.1]) - - CS1 = plt.contour(self.lambda_kappa, self.lambda_mu, region.T, levels, colors='k', linestyles="dashed") - # CS2 = plt.contour(self.lambda_k, self.lambda_mu, np.absolute(region.T), [1.0], colors='r') - - plt.clabel(CS1, inline=True, fmt="%3.2f") - - plt.gca().set_xticks(np.arange(0, int(lam_k_max) + 3, 3)) - plt.gca().set_yticks(np.arange(0, int(lam_mu_max) + 3, 3)) - plt.gca().tick_params(axis="both", which="both") - plt.xlim([0.0, lam_k_max]) - plt.ylim([0.0, lam_mu_max]) - - plt.xlabel(r"$\Delta t\cdot \kappa }$", labelpad=0.0) - plt.ylabel(r"$\Delta t\cdot \mu }$", labelpad=0.0) - if self.RKN: - plt.title(f"{title}") - if self.radius: - plt.title("{} $M={}$".format(title, self.num_nodes)) - else: - plt.title(r"{} $M={},\ K={}$".format(title, self.num_nodes, self.K_iter)) - plt.tight_layout() - plt.savefig(self.cwd + "data/M={}_K={}_redion_{}.pdf".format(self.num_nodes, self.K_iter, title)) - - def run_SDC_stability(self): # pragma: no cover - self.RKN = False - self.radius = False - self.plot_stability(self.SDC, title="SDC stability region") - - def run_Picard_stability(self): # pragma: no cover - self.RKN = False - self.radius = False - self.plot_stability(self.picard, title="Picard stability region") - - def run_Ksdc(self): # pragma: no cover - self.radius = True - self.plot_stability(self.Ksdc, title="$K_{sdc}$ spectral radius") - - def run_Kpicard(self): # pragma: no cover - self.radius = True - self.plot_stability(self.Kpicard, title="$K_{picard}$ spectral radius") - - def run_RKN_stability(self): # pragma: no cover - self.RKN = True - self.radius = False - region_RKN = self.stability_data_RKN() - self.plot_stability(region_RKN.T, title='RKN-4 stability region') diff --git a/pySDC/projects/Second_orderSDC/penningtrap_run_Hamiltonian_error.py b/pySDC/projects/Second_orderSDC/penningtrap_run_Hamiltonian_error.py index 7fae6b6f30..28f9ea01bb 100644 --- a/pySDC/projects/Second_orderSDC/penningtrap_run_Hamiltonian_error.py +++ b/pySDC/projects/Second_orderSDC/penningtrap_run_Hamiltonian_error.py @@ -1,4 +1,4 @@ -# It checks whether data folder exicits or not +# This code checks if the "data" folder exists or not. exec(open("check_data_folder.py").read()) import matplotlib.pyplot as plt @@ -11,7 +11,7 @@ from pySDC.implementations.sweeper_classes.boris_2nd_order import boris_2nd_order from pySDC.projects.Second_orderSDC.penningtrap_HookClass import particles_output from pySDC.implementations.sweeper_classes.Runge_Kutta_Nystrom import RKN -from pySDC.projects.Second_orderSDC.penningtrap_Simulation import fixed_plot_params +from pySDC.projects.Second_orderSDC.plot_helper import set_fixed_plot_params def main(dt, tend, maxiter, M, sweeper): # pragma: no cover @@ -97,7 +97,7 @@ def plot_Hamiltonian_error(K, M, dt): # pragma: no cover M: number of quadrature nodes dt: time step """ - fixed_plot_params() + set_fixed_plot_params() # Define final time time = 1e6 tn = dt diff --git a/pySDC/projects/Second_orderSDC/penningtrap_run_error.py b/pySDC/projects/Second_orderSDC/penningtrap_run_error.py index 1f74d42193..be1d8e4d5a 100644 --- a/pySDC/projects/Second_orderSDC/penningtrap_run_error.py +++ b/pySDC/projects/Second_orderSDC/penningtrap_run_error.py @@ -1,5 +1,5 @@ from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params -from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error +from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError if __name__ == '__main__': """ @@ -15,17 +15,17 @@ Note: Tend: final time value can be given manually by default Tend=2 """ - # It checks whether data folder exicits or not + # This code checks if the "data" folder exists or not. exec(open("check_data_folder.py").read()) # Get params for the penning trap problem from the function controller_params, description = penningtrap_params() ## ============================================================================= ## dt-timestep and num_nodes can be changed here manually - description['level_params']['dt'] = 0.015625 * 4 + description['level_params']['dt'] = 0.015625 / 4 description['sweeper_params']['num_nodes'] = 4 ## ============================================================================= # Give the parameters to the class - conv = compute_error(controller_params, description, time_iter=3, K_iter=(1, 2, 3, 10), axes=(2,)) + conv = ComputeError(controller_params, description, time_iter=3, K_iter=(1, 2, 3), axes=(0,)) # Run local convergence order # conv.run_local_error() # Run global convergence order diff --git a/pySDC/projects/Second_orderSDC/penningtrap_run_work_precision.py b/pySDC/projects/Second_orderSDC/penningtrap_run_work_precision.py index eb47975285..af81f49f16 100644 --- a/pySDC/projects/Second_orderSDC/penningtrap_run_work_precision.py +++ b/pySDC/projects/Second_orderSDC/penningtrap_run_work_precision.py @@ -1,7 +1,7 @@ -# It checks whether data folder exicits or not +# This code checks if the "data" folder exists or not. exec(open("check_data_folder.py").read()) -from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error +from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params if __name__ == '__main__': @@ -10,7 +10,7 @@ All parameters are given in penningtrap_params Note: * time needs to be changed according to choosen axis - * Tend fixed but it can be changed by defining Tend and include in it in compute_error + * Tend fixed but it can be changed by defining Tend and include in it in ComputeError * To implement Velocity-Verlet scheme set VV=True like run_work_precision(VV=True) * RKN method can be removed by setting RKN=False like run_work_precision(RKN=False) * dt timestep can be changed here as well @@ -21,8 +21,8 @@ ## ============================================================================= ## dt-timestep and Tend can be changed here manually Tend = 128 * 0.015625 - description['level_params']['dt'] = 0.015625 * 4 - description['sweeper_params']['initial_guess'] = 'spread' # 'zero', 'spread' + description['level_params']['dt'] = 0.015625 * 2 + description['sweeper_params']['initial_guess'] = 'spread' #'random' 'zero', 'spread' ## ============================================================================= - work_pre = compute_error(controller_params, description, time_iter=3, Tend=Tend, K_iter=(1, 2, 3), axes=(2,)) + work_pre = ComputeError(controller_params, description, time_iter=3, Tend=Tend, K_iter=(1, 2, 3), axes=(2,)) work_pre.run_work_precision(RK=True) diff --git a/pySDC/projects/Second_orderSDC/plot_helper.py b/pySDC/projects/Second_orderSDC/plot_helper.py new file mode 100644 index 0000000000..61514d6bfe --- /dev/null +++ b/pySDC/projects/Second_orderSDC/plot_helper.py @@ -0,0 +1,406 @@ +# import matplotlib + +# matplotlib.use('Agg') +# import os + +import numpy as np +import matplotlib.pyplot as plt + +FONT_SIZE = 16 +FIG_SIZE = (7.44, 6.74) + + +def set_fixed_plot_params(): # pragma: no cover + """ + Set fixed parameters for all plots + """ + plt.rcParams['figure.figsize'] = FIG_SIZE + plt.rcParams['pgf.rcfonts'] = False + + plt.rcParams['lines.linewidth'] = 2.5 + plt.rcParams['axes.titlesize'] = FONT_SIZE + 5 + plt.rcParams['axes.labelsize'] = FONT_SIZE + 5 + plt.rcParams['xtick.labelsize'] = FONT_SIZE + plt.rcParams['ytick.labelsize'] = FONT_SIZE + plt.rcParams['xtick.major.pad'] = 5 + plt.rcParams['ytick.major.pad'] = 5 + plt.rcParams['axes.labelpad'] = 6 + plt.rcParams['lines.markersize'] = FONT_SIZE - 2 + plt.rcParams['lines.markeredgewidth'] = 1 + plt.rcParams['mathtext.fontset'] = 'cm' + plt.rcParams['mathtext.rm'] = 'serif' + plt.rc('font', size=FONT_SIZE) + + +class PlotManager(object): # pragma: no cover + """ + This class generates all of the plots of the Second-order SDC plots. + """ + + def __init__(self, controller_params, description, time_iter=3, K_iter=(1, 2, 3), Tend=2, axes=(1,), cwd=''): + self.controller_params = controller_params + self.description = description + self.time_iter = time_iter + self.K_iter = K_iter + self.Tend = Tend + self.axes = axes + self.cwd = cwd + self.quad_type = self.description['sweeper_params']['quad_type'] + self.num_nodes = self.description['sweeper_params']['num_nodes'] + self.error_type = 'local' + + def plot_convergence(self): + """ + Plot convergence order plots for the position and velocity + If you change parameters of the values you need set y_lim values need to set manually + """ + set_fixed_plot_params() + [N, time_data, error_data, order_data, convline] = self.organize_data( + filename='data/dt_vs_{}_errorSDC.csv'.format(self.error_type) + ) + + color = ['r', 'brown', 'g', 'blue'] + shape = ['o', 'd', 's', 'x'] + + fig1, ax1 = plt.subplots() + fig2, ax2 = plt.subplots() + value = self.axes[0] + for ii in range(0, N): + ax1.loglog(time_data[ii, :], convline['pos'][value, ii, :], color='black') + ax1.loglog( + time_data[ii, :], + error_data['pos'][value, ii, :], + ' ', + color=color[ii], + marker=shape[ii], + label='k={}'.format(int(self.K_iter[ii])), + ) + if value == 2: + ax1.text( + time_data[ii, 1], + 0.25 * convline['pos'][value, ii, 1], + r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['pos'][ii, 0, 1]), + size=18, + ) + else: + ax1.text( + time_data[ii, 1], + 0.25 * convline['pos'][value, ii, 1], + r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['pos'][ii, 0, 0]), + size=18, + ) + + if self.error_type == 'Local': + ax1.set_ylabel(r'$\Delta x^{\mathrm{(abs)}}_{%d}$' % (value + 1)) + else: + ax1.set_ylabel(r'$\Delta x^{\mathrm{(rel)}}_{%d}$' % (value + 1)) + ax1.set_title('{} order of convergence, $M={}$'.format(self.error_type, self.num_nodes)) + ax1.set_xlabel(r'$\omega_{B} \cdot \Delta t$') + + ax1.legend(loc='best') + fig1.tight_layout() + fig1.savefig(self.cwd + 'data/{}_conv_plot_pos{}.pdf'.format(self.error_type, value + 1)) + + for ii in range(0, N): + ax2.loglog(time_data[ii, :], convline['vel'][value, ii, :], color='black') + ax2.loglog( + time_data[ii, :], + error_data['vel'][value, ii, :], + ' ', + color=color[ii], + marker=shape[ii], + label='k={}'.format(int(self.K_iter[ii])), + ) + + if value == 2: + ax2.text( + time_data[ii, 1], + 0.25 * convline['vel'][value, ii, 1], + r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['vel'][ii, 0, 1]), + size=18, + ) + else: + ax2.text( + time_data[ii, 1], + 0.25 * convline['vel'][value, ii, 1], + r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['vel'][ii, 0, 0]), + size=18, + ) + + if self.error_type == 'Local': + ax2.set_ylabel(r'$\Delta v^{\mathrm{(abs)}}_{%d}$' % (value + 1)) + else: + ax2.set_ylabel(r'$\Delta v^{\mathrm{(rel)}}_{%d}$' % (value + 1)) + ax2.set_title(r'{} order of convergence, $M={}$'.format(self.error_type, self.num_nodes)) + ax2.set_xlabel(r'$\omega_{B} \cdot \Delta t$') + # ============================================================================= + # Setting y axis min and max values + # ============================================================================= + if self.error_type == 'global': + ax2.set_ylim(1e-14, 1e1) + ax1.set_ylim(1e-14, 1e1) + else: + ax2.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) + ax1.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) + ax2.legend(loc='best') + fig2.tight_layout() + plt.show() + fig2.savefig(self.cwd + 'data/{}_conv_plot_vel{}.pdf'.format(self.error_type, value + 1)) + + def format_number(self, data_value, indx): + """ + Change format of the x axis for the work precision plots + """ + if data_value >= 1_000_000: + formatter = "{:1.1f}M".format(data_value * 0.000_001) + else: + formatter = "{:1.0f}K".format(data_value * 0.001) + return formatter + + def plot_work_precision(self): + """ + Generate work precision plots + """ + set_fixed_plot_params() + [N, func_eval_SDC, error_SDC, *_] = self.organize_data( + filename=self.cwd + 'data/rhs_eval_vs_global_errorSDC.csv', + time_iter=self.time_iter, + ) + + [N, func_eval_picard, error_picard, *_] = self.organize_data( + filename=self.cwd + 'data/rhs_eval_vs_global_errorPicard.csv', + time_iter=self.time_iter, + ) + + color = ['r', 'brown', 'g', 'blue'] + shape = ['o', 'd', 's', 'x'] + fig1, ax1 = plt.subplots() + fig2, ax2 = plt.subplots() + value = self.axes[0] + + if self.RK: + [N, func_eval_RKN, error_RKN, *_] = self.organize_data( + filename=self.cwd + 'data/rhs_eval_vs_global_errorRKN.csv', + time_iter=self.time_iter, + ) + + ax1.loglog( + func_eval_RKN[0], + error_RKN['pos'][value,][0][:], + ls='dashdot', + color='purple', + marker='p', + label='RKN-4', + ) + ax2.loglog( + func_eval_RKN[0], + error_RKN['vel'][value,][0][:], + ls='dashdot', + color='purple', + marker='p', + label='RKN-4', + ) + if self.VV: + [N, func_eval_VV, error_VV, *_] = self.organize_data( + filename=self.cwd + 'data/rhs_eval_vs_global_errorVV.csv', + time_iter=self.time_iter, + ) + + ax1.loglog( + func_eval_VV[0], + error_VV['pos'][value,][0][:], + ls='dashdot', + color='blue', + marker='H', + label='Velocity-Verlet', + ) + ax2.loglog( + func_eval_VV[0], + error_VV['vel'][value,][0][:], + ls='dashdot', + color='blue', + marker='H', + label='Velocity-Verlet', + ) + + for ii, jj in enumerate(self.K_iter): + # ============================================================================= + # # If you want to get exactly the same picture like in paper uncomment this only for vertical axis + # if ii==0 or ii==1: + # ax1.loglog(func_eval_SDC[ii, :][1:], error_SDC['pos'][value, ii, :][1:], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) + # ax1.loglog(func_eval_picard[ii,:][1:], error_picard['pos'][value, ii, :][1:], ls='--', color=color[ii], marker=shape[ii]) + + # ax2.loglog(func_eval_SDC[ii, :][1:], error_SDC['vel'][value, ii, :][1:], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) + # ax2.loglog(func_eval_picard[ii,:][1:], error_picard['vel'][value, ii, :][1:], ls='--', color=color[ii], marker=shape[ii]) + # else: + + # ax1.loglog(func_eval_SDC[ii, :][:-1], error_SDC['pos'][value, ii, :][:-1], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) + # ax1.loglog(func_eval_picard[ii,:][:-1], error_picard['pos'][value, ii, :][:-1], ls='--', color=color[ii], marker=shape[ii]) + + # ax2.loglog(func_eval_SDC[ii, :][:-1], error_SDC['vel'][value, ii, :][:-1], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) + # ax2.loglog(func_eval_picard[ii,:][:-1], error_picard['vel'][value, ii, :][:-1], ls='--', color=color[ii], marker=shape[ii]) + # + # ============================================================================= + ax1.loglog( + func_eval_SDC[ii, :], + error_SDC['pos'][value, ii, :], + ls='solid', + color=color[ii], + marker=shape[ii], + label='k={}'.format(jj), + ) + ax1.loglog( + func_eval_picard[ii, :], error_picard['pos'][value, ii, :], ls='--', color=color[ii], marker=shape[ii] + ) + + ax2.loglog( + func_eval_SDC[ii, :], + error_SDC['vel'][value, ii, :], + ls='solid', + color=color[ii], + marker=shape[ii], + label='k={}'.format(jj), + ) + ax2.loglog( + func_eval_picard[ii, :], error_picard['vel'][value, ii, :], ls='--', color=color[ii], marker=shape[ii] + ) + + xmin = np.min(ax1.get_xlim()) + xmax = np.max(ax2.get_xlim()) + xmin = round(xmin, -3) + xmax = round(xmax, -3) + + xx = np.linspace(np.log(xmin), np.log(xmax), 5) + xx = 3**xx + xx = xx[np.where(xx < xmax)] + # xx=[2*1e+3,4*1e+3, 8*1e+3] + ax1.grid(True) + + ax1.set_title("$M={}$".format(self.num_nodes)) + ax1.set_xlabel("Number of RHS evaluations") + ax1.set_ylabel(r'$\Delta x^{\mathrm{(rel)}}_{%d}$' % (value + 1)) + ax1.loglog([], [], color="black", ls="--", label="Picard iteration") + ax1.loglog([], [], color="black", ls="solid", label="Boris-SDC iteration") + + ax1.set_xticks(xx) + ax1.xaxis.set_major_formatter(self.format_number) + ax1.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) + # ax1.set_ylim(1e-14, 1e+0) + + ax1.legend(loc="best", fontsize=12) + fig1.tight_layout() + fig1.savefig(self.cwd + "data/f_eval_pos_{}_M={}.pdf".format(value, self.num_nodes)) + + ax2.grid(True) + ax2.xaxis.set_major_formatter(self.format_number) + ax2.set_title("$M={}$".format(self.num_nodes)) + ax2.set_xlabel("Number of RHS evaluations") + ax2.set_ylabel(r'$\Delta v^{\mathrm{(rel)}}_{%d}$' % (value + 1)) + ax2.loglog([], [], color="black", ls="--", label="Picard iteration") + ax2.loglog([], [], color="black", ls="solid", label="Boris-SDC iteration") + ax2.set_xticks(xx) + ax2.xaxis.set_major_formatter(self.format_number) + ax2.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) + # ax2.set_ylim(1e-14, 1e+0) + ax2.legend(loc="best", fontsize=12) + fig2.tight_layout() + fig2.savefig(self.cwd + "data/f_eval_vel_{}_M={}.pdf".format(value, self.num_nodes)) + plt.show() + + def organize_data(self, filename='data/dt_vs_local_errorSDC.csv', time_iter=None): + """ + Organize data according to plot + Args: + filename (string): data to find approximate order + time_iter : in case it you used different time iterations + """ + if time_iter == None: + time_iter = self.time_iter + + items = np.genfromtxt(filename, delimiter=',', skip_header=1) + time = items[:, 0] + N = int(np.size(time) / time_iter) + + error_data = {'pos': np.zeros([3, N, time_iter]), 'vel': np.zeros([3, N, time_iter])} + order_data = {'pos': np.zeros([N, time_iter, 2]), 'vel': np.zeros([N, time_iter, 2])} + time_data = np.zeros([N, time_iter]) + convline = {'pos': np.zeros([3, N, time_iter]), 'vel': np.zeros([3, N, time_iter])} + + time_data = time.reshape([N, time_iter]) + + order_data['pos'][:, :, 0] = items[:, 1].reshape([N, time_iter]) + order_data['pos'][:, :, 1] = items[:, 2].reshape([N, time_iter]) + order_data['vel'][:, :, 0] = items[:, 6].reshape([N, time_iter]) + order_data['vel'][:, :, 1] = items[:, 7].reshape([N, time_iter]) + + for ii in range(0, 3): + error_data['pos'][ii, :, :] = items[:, ii + 3].reshape([N, time_iter]) + error_data['vel'][ii, :, :] = items[:, ii + 8].reshape([N, time_iter]) + + for jj in range(0, 3): + if jj == 2: + convline['pos'][jj, :, :] = ( + (time_data / time_data[0, 0]).T ** order_data['pos'][:, jj, 1] + ).T * error_data['pos'][jj, :, 0][:, None] + convline['vel'][jj, :, :] = ( + (time_data / time_data[0, 0]).T ** order_data['vel'][:, jj, 1] + ).T * error_data['vel'][jj, :, 0][:, None] + else: + convline['pos'][jj, :, :] = ( + (time_data / time_data[0, 0]).T ** order_data['pos'][:, jj, 0] + ).T * error_data['pos'][jj, :, 0][:, None] + convline['vel'][jj, :, :] = ( + (time_data / time_data[0, 0]).T ** order_data['vel'][:, jj, 0] + ).T * error_data['vel'][jj, :, 0][:, None] + + return [N, time_data, error_data, order_data, convline] + + # find approximate order + def find_approximate_order(self, filename='data/dt_vs_local_errorSDC.csv'): + """ + This function finds approximate convergence rate and saves in the data folder + Args: + filename: given data + return: + None + """ + [N, time_data, error_data, order_data, convline] = self.organize_data(self.cwd + filename) + approx_order = {'pos': np.zeros([1, N]), 'vel': np.zeros([1, N])} + + for jj in range(0, 3): + if jj == 0: + file = open(self.cwd + 'data/{}_order_vs_approx_order.csv'.format(self.error_type), 'w') + + else: + file = open(self.cwd + 'data/{}_order_vs_approx_order.csv'.format(self.error_type), 'a') + + for ii in range(0, N): + approx_order['pos'][0, ii] = np.polyfit( + np.log(time_data[ii, :]), np.log(error_data['pos'][jj, ii, :]), 1 + )[0].real + approx_order['vel'][0, ii] = np.polyfit( + np.log(time_data[ii, :]), np.log(error_data['vel'][jj, ii, :]), 1 + )[0].real + if jj == 2: + file.write( + str(order_data['pos'][:, jj, 1]) + + ' | ' + + str(approx_order['pos'][0]) + + ' | ' + + str(order_data['vel'][:, jj, 1]) + + ' | ' + + str(approx_order['vel'][0]) + + '\n' + ) + else: + file.write( + str(order_data['pos'][:, jj, 0]) + + ' | ' + + str(approx_order['pos'][0]) + + ' | ' + + str(order_data['vel'][:, jj, 0]) + + ' | ' + + str(approx_order['vel'][0]) + + '\n' + ) + file.close() diff --git a/pySDC/projects/Second_orderSDC/stability_simulation.py b/pySDC/projects/Second_orderSDC/stability_simulation.py new file mode 100644 index 0000000000..bd99125359 --- /dev/null +++ b/pySDC/projects/Second_orderSDC/stability_simulation.py @@ -0,0 +1,335 @@ +import numpy as np +import matplotlib.pyplot as plt +from pySDC.core.Errors import ProblemError +from pySDC.core.Step import step + +from pySDC.projects.Second_orderSDC.plot_helper import set_fixed_plot_params + + +class StabilityImplementation: + """ + This class computes and implements stability region of the harmonic oscillator problem + by using different methods (SDC, Picard, RKN). + + Parameters + ----------- + description: gets default paramets for the problem class + kappa_max: maximum value of kappa can reach + mu_max: maximum value of mu can reach + Num_iter: maximum iterations for the kappa and mu on the x and y axes + cwd: current working + + """ + + def __init__(self, description, kappa_max=20, mu_max=20, Num_iter=(400, 400), cwd=''): + self.description = description + self.kappa_max = kappa_max + self.mu_max = mu_max + self.kappa_iter = Num_iter[0] + self.mu_iter = Num_iter[1] + self.lambda_kappa = np.linspace(0.0, self.kappa_max, self.kappa_iter) + self.lambda_mu = np.linspace(1e-10, self.mu_max, self.mu_iter) + + self.K_iter = description['step_params']['maxiter'] + self.num_nodes = description['sweeper_params']['num_nodes'] + self.dt = description['level_params']['dt'] + self.SDC, self.Ksdc, self.picard, self.Kpicard = self.stability_data() + self.cwd = cwd + + def stability_data(self): + """ + Computes stability domain matrix for the Harmonic oscillator problem + Returns: + numpy.ndarray: domain_SDC + numpy.ndarray: domain_Ksdc + numpy.ndarray: domain_picard + numpy.ndarray: domain_Kpicard + """ + S = step(description=self.description) + # Define L to get access level params and functions + L = S.levels[0] + # Number of nodes + num_nodes = L.sweep.coll.num_nodes + # Time step + dt = L.params.dt + + # Define Collocation matrix to find for the stability function + Q = L.sweep.coll.Qmat[1:, 1:] + QQ = np.dot(Q, Q) + Q_coll = np.block([[QQ, np.zeros([num_nodes, num_nodes])], [np.zeros([num_nodes, num_nodes]), Q]]) + qQ = np.dot(L.sweep.coll.weights, Q) + # Matrix with all entries 1 + ones = np.block([[np.ones(num_nodes), np.zeros(num_nodes)], [np.zeros(num_nodes), np.ones(num_nodes)]]) + # Combine all of the weights into a single matrix + q_mat = np.block( + [ + [dt**2 * qQ, np.zeros(num_nodes)], + [np.zeros(num_nodes), dt * L.sweep.coll.weights], + ] + ) + # Zeros matrices to store the values for the stability region values + domain_SDC = np.zeros((self.kappa_iter, self.mu_iter), dtype="complex") + domain_picard = np.zeros((self.kappa_iter, self.mu_iter)) + domain_Ksdc = np.zeros((self.kappa_iter, self.mu_iter)) + domain_Kpicard = np.zeros((self.kappa_iter, self.mu_iter)) + # Loop over the different values of the kappa and mu values + for i in range(0, self.kappa_iter): + for j in range(0, self.mu_iter): + k = self.lambda_kappa[i] + mu = self.lambda_mu[j] + # Build right hand side matrix function for the harmonic oscillator problem + F = np.block( + [ + [-k * np.eye(num_nodes), -mu * np.eye(num_nodes)], + [-k * np.eye(num_nodes), -mu * np.eye(num_nodes)], + ] + ) + + if self.K_iter != 0: + # num iteration is not equal to zero then do SDC and Picard iteration + lambdas = [k, mu] + SDC_mat_sweep, Ksdc_eigval = L.sweep.get_scalar_problems_manysweep_mats( + nsweeps=self.K_iter, lambdas=lambdas + ) + # If picard_mats_sweep=True then do also Picard iteration + if L.sweep.params.picard_mats_sweep: + ( + Picard_mat_sweep, + Kpicard_eigval, + ) = L.sweep.get_scalar_problems_picardsweep_mats(nsweeps=self.K_iter, lambdas=lambdas) + else: + ProblemError("Picard iteration is not enabled. Set 'picard_mats_sweep' to True to enable.") + domain_Ksdc[i, j] = Ksdc_eigval + if L.sweep.params.picard_mats_sweep: + domain_Kpicard[i, j] = Kpicard_eigval + + else: + # Otherwise Collocation problem + SDC_mat_sweep = np.linalg.inv(np.eye(2 * num_nodes) - dt * np.dot(Q_coll, F)) + # Collation update for both Picard and SDC iterations + if L.sweep.params.do_coll_update: + FSDC = np.dot(F, SDC_mat_sweep) + Rsdc_mat = np.array([[1.0, dt], [0, 1.0]]) + np.dot(q_mat, FSDC) @ ones.T + stab_func, v = np.linalg.eig(Rsdc_mat) + + if L.sweep.params.picard_mats_sweep: + FPicard = np.dot(F, Picard_mat_sweep) + Rpicard_mat = np.array([[1.0, dt], [0, 1.0]]) + np.dot(q_mat, FPicard) @ ones.T + stab_func_picard, v = np.linalg.eig(Rpicard_mat) + else: + raise ProblemError("Collocation update step works only when 'do_coll_update' is set to True.") + # Find and store spectral radius + domain_SDC[i, j] = np.max(np.abs(stab_func)) + if L.sweep.params.picard_mats_sweep: + domain_picard[i, j] = np.max(np.abs(stab_func_picard)) + + return ( + dt * domain_SDC.real, + dt * domain_Ksdc.real, + dt * domain_picard.real, + dt * domain_Kpicard.real, + ) + + def stability_function_RKN(self, k, mu, dt): + """ + Stability function of RKN method + + Returns: + float: maximum absolute values of eigvales + """ + A = np.array([[0, 0, 0, 0], [0.5, 0, 0, 0], [0, 0.5, 0, 0], [0, 0, 1, 0]]) + B = np.array([[0, 0, 0, 0], [0.125, 0, 0, 0], [0.125, 0, 0, 0], [0, 0, 0.5, 0]]) + c = np.array([0, 0.5, 0.5, 1]) + b = np.array([1 / 6, 2 / 6, 2 / 6, 1 / 6]) + bA = np.array([1 / 6, 1 / 6, 1 / 6, 0]) + L = np.eye(4) + k * (dt**2) * B + mu * dt * A + R = np.block([[-k * np.ones(4)], [-(k * c + mu * np.ones(4))]]) + + K = np.linalg.inv(L) @ R.T + C = np.block([[dt**2 * bA], [dt * b]]) + Y = np.array([[1, dt], [0, 1]]) + C @ K + eigval = np.linalg.eigvals(Y) + + return np.max(np.abs(eigval)) + + def stability_data_RKN(self): + """ + Compute and store values into a matrix + + Returns: + numpy.ndarray: stab_RKN + """ + stab_RKN = np.zeros([self.kappa_iter, self.mu_iter]) + for ii, kk in enumerate(self.lambda_kappa): + for jj, mm in enumerate(self.lambda_mu): + stab_RKN[jj, ii] = self.stability_function_RKN(kk, mm, self.dt) + + return stab_RKN + + def plot_stability(self, region, title=""): # pragma: no cover + """ + Plotting runtine for moduli + + Args: + stabval (numpy.ndarray): moduli + title: title for the plot + """ + set_fixed_plot_params() + lam_k_max = np.amax(self.lambda_kappa) + lam_mu_max = np.amax(self.lambda_mu) + + plt.figure() + levels = np.array([0.25, 0.5, 0.75, 0.9, 1.0, 1.1]) + + CS1 = plt.contour(self.lambda_kappa, self.lambda_mu, region.T, levels, colors='k', linestyles="dashed") + # CS2 = plt.contour(self.lambda_k, self.lambda_mu, np.absolute(region.T), [1.0], colors='r') + + plt.clabel(CS1, inline=True, fmt="%3.2f") + + plt.gca().set_xticks(np.arange(0, int(lam_k_max) + 3, 3)) + plt.gca().set_yticks(np.arange(0, int(lam_mu_max) + 3, 3)) + plt.gca().tick_params(axis="both", which="both") + plt.xlim([0.0, lam_k_max]) + plt.ylim([0.0, lam_mu_max]) + + plt.xlabel(r"$\Delta t\cdot \kappa$", labelpad=0.0) + plt.ylabel(r"$\Delta t\cdot \mu$", labelpad=0.0) + if self.RKN: + plt.title(f"{title}") + if self.radius: + plt.title("{} $M={}$".format(title, self.num_nodes)) + else: + plt.title(r"{} $M={},\ K={}$".format(title, self.num_nodes, self.K_iter)) + plt.tight_layout() + plt.savefig(self.cwd + "data/M={}_K={}_redion_{}.pdf".format(self.num_nodes, self.K_iter, title)) + + def run_SDC_stability(self): # pragma: no cover + self.RKN = False + self.radius = False + self.plot_stability(self.SDC, title="SDC stability region") + + def run_Picard_stability(self): # pragma: no cover + self.RKN = False + self.radius = False + self.plot_stability(self.picard, title="Picard stability region") + + def run_Ksdc(self): # pragma: no cover + self.radius = True + self.plot_stability(self.Ksdc, title="$K_{sdc}$ spectral radius") + + def run_Kpicard(self): # pragma: no cover + self.radius = True + self.plot_stability(self.Kpicard, title="$K_{picard}$ spectral radius") + + def run_RKN_stability(self): # pragma: no cover + self.RKN = True + self.radius = False + region_RKN = self.stability_data_RKN() + self.plot_stability(region_RKN.T, title='RKN-4 stability region') + + +def check_points_and_interval( + description, helper_params, point, compute_interval=False, check_stability_point=False, Picard=False +): + # Storage for stability interval and stability check + interval_data = [] + points_data = [] + + # Loop through different numbers of nodes and maximum iterations + for quad_type in helper_params['quad_type_list']: + for num_nodes in helper_params['num_nodes_list']: + for max_iter in helper_params['max_iter_list']: + # Update simulation parameters + description['sweeper_params']['num_nodes'] = num_nodes + description['sweeper_params']['quad_type'] = quad_type + description['step_params']['maxiter'] = max_iter + + # Create Stability_implementation instance for stability check + + stab_model = StabilityImplementation( + description, kappa_max=point[0], mu_max=point[1], Num_iter=helper_params['Num_iter'] + ) + if compute_interval: + # Extract the values where SDC is less than or equal to 1 + if Picard: + mask = stab_model.picard <= 1 + 1e-14 + else: + mask = stab_model.SDC <= 1.0 + for ii in range(len(mask)): + if mask[ii]: + kappa_max_interval = stab_model.lambda_kappa[ii] + else: + break + + # Add row to the interval data + interval_data.append([quad_type, num_nodes, max_iter, kappa_max_interval]) + + if check_stability_point: + # Check stability and print results + if stab_model.SDC[-1, -1] <= 1: + stability_result = "Stable" + else: + stability_result = "Unstable. Increase M or K" + + # Add row to the results data + points_data.append( + [quad_type, num_nodes, max_iter, point, stability_result, stab_model.SDC[-1, -1]] + ) + if compute_interval: + return interval_data + else: + return points_data + + +def compute_and_generate_table( + description, + helper_params, + point, + compute_interval=False, + save_interval_file=False, + interval_filename='./data/stab_interval.txt', + check_stability_point=False, + save_points_table=False, + points_table_filename='./data/point_table.txt', + quadrature_list=('GAUSS', 'LOBATTO'), + Picard=False, +): # pragma: no cover + from tabulate import tabulate + + if compute_interval: + interval_data = check_points_and_interval( + description, helper_params, point, compute_interval=compute_interval, Picard=Picard + ) + else: + points_data = check_points_and_interval( + description, helper_params, point, check_stability_point=check_stability_point + ) + + # Define column names for interval data + interval_headers = ["Quad Type", "Num Nodes", "Max Iter", 'kappa_max'] + + # Define column names for results data + points_headers = ["Quad Type", "Num Nodes", "Max Iter", "(kappa, mu)", "Stability", "Spectral Radius"] + # Print or save the tables using tabulate + if save_interval_file and compute_interval: + interval_table_str = tabulate(interval_data, headers=interval_headers, tablefmt="grid") + with open(interval_filename, 'w') as file: + file.write(interval_table_str) + print(f"Stability Interval Table saved to {interval_filename}") + + if save_points_table and check_stability_point: + points_table_str = tabulate(points_data, headers=points_headers, tablefmt="grid") + with open(points_table_filename, 'w') as file: + file.write(points_table_str) + print(f"Stability Results Table saved to {points_table_filename}") + + if compute_interval: + if Picard: + print("Picard stability Interval Table:") + else: + print("SDC stability Interval Table:") + print(tabulate(interval_data, headers=interval_headers, tablefmt="grid")) + + if check_stability_point: + print("\nStability Results Table:") + print(tabulate(points_data, headers=points_headers, tablefmt="grid")) diff --git a/pySDC/projects/parallelSDC/preconditioner_playground_MPI.py b/pySDC/projects/parallelSDC/preconditioner_playground_MPI.py index 82d2766c57..c2a1e66bdf 100644 --- a/pySDC/projects/parallelSDC/preconditioner_playground_MPI.py +++ b/pySDC/projects/parallelSDC/preconditioner_playground_MPI.py @@ -189,9 +189,10 @@ def plot_iterations(): color_list = ['r', 'g', 'b', 'c', 'm'] plt_helper.setup_mpl() - + print('post setup') # loop over setups and Q-delta types: one figure per setup, all Qds in one plot for setup in setup_list: + print('setup') plt_helper.newfig(textwidth=238.96, scale=0.89) for qd_type, marker, color in zip(qd_type_list, marker_list, color_list): diff --git a/pySDC/projects/parallelSDC_reloaded/.gitignore b/pySDC/projects/parallelSDC_reloaded/.gitignore new file mode 100644 index 0000000000..61a98ac609 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/.gitignore @@ -0,0 +1,2 @@ +*.pdf +_sol*.json diff --git a/pySDC/projects/parallelSDC_reloaded/README.md b/pySDC/projects/parallelSDC_reloaded/README.md new file mode 100644 index 0000000000..c11552253f --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/README.md @@ -0,0 +1,36 @@ +Numerical experiment scripts for the parallel SDC paper +============================================================== + +Python scripts of the numerical experiment for the following paper: + +.. code-block:: tex + + @misc{caklovicXXXXimproving, + title={Improving Parallelism Across the Method for Spectral Deferred Corrections}, + author={Gayatri \v{C}aklovi\'c and Lunet Thibaut and Götschel Sebastian and Ruprecht Daniel}, + year={2023}, + comment={to be submitted to SISC}, + } + +Figures for the manuscript +-------------------------- + +See the `scripts` folder, all python scripts have the `fig0[...]` prefix, and each script generate several figures +from the manuscript. +One can run everything with the `run.sh` script, and crop the pdf figures to the format used in the manuscript with the `crop.sh` script. + +Experimental scripts +-------------------- + +For several problem `probName`, there is two scripts : + +- `{probName}_setup.py` : runs the problem with specific parameters and one given SDC configuration +- `{probName}_accuracy.py` : generate error vs dt and error vs cost figures for different SDC configurations + +In addition, there is those generic scripts for analysis : + +- `convergence.py` : generate convergence graph for specific SDC configurations, using Dahlquist +- `nilpotency.py` : look at nilpotency of stiff and non-stiff limit of the SDC iteration matrix +- `stability.py` : generate stability contours for specific SDC configurations + +Finally, all those scripts use the utility module `utils.py`. diff --git a/pySDC/projects/parallelSDC_reloaded/__init__.py b/pySDC/projects/parallelSDC_reloaded/__init__.py new file mode 100644 index 0000000000..792d600548 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/__init__.py @@ -0,0 +1 @@ +# diff --git a/pySDC/projects/parallelSDC_reloaded/allenCahn_accuracy.py b/pySDC/projects/parallelSDC_reloaded/allenCahn_accuracy.py new file mode 100644 index 0000000000..d3fe676c31 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/allenCahn_accuracy.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 5 11:02:39 2023 + +Script to investigate diagonal SDC on the Allen-Cahn problem : + +- error VS time-step +- error VS computation cost + +Note : implementation in progress ... +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import getParamsSDC, getParamsRK, solutionSDC, solutionExact + +# Problem parameters +tEnd = 50 +pName = "ALLEN-CAHN" +periodic = False +pParams = { + "periodic": periodic, + "nvars": 2**11 - (not periodic), + "epsilon": 0.04, +} + + +def getError(uNum, uRef): + if uNum is None: + return np.inf + return np.linalg.norm(uRef[-1, :] - uNum[-1, :], ord=2) + + +def getCost(counters): + nNewton, nRHS, tComp = counters + return 2 * nNewton + nRHS + + +# Base variable parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 1 / nNodes + +qDeltaList = [ + 'RK4', + 'ESDIRK53', + 'VDHS', + 'MIN', + # 'IE', 'LU', 'IEpar', 'PIC', + 'MIN-SR-NS', + 'MIN-SR-S', + 'MIN-SR-FLEX', + "PIC", + # "MIN3", +] +nStepsList = np.array([1, 2, 5, 10, 20, 50, 100, 200]) +nSweepList = [1, 2, 3, 4, 5, 6] + +qDeltaList = ['ESDIRK43', 'MIN-SR-FLEX'] +nSweepList = [4] + + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 +fig, axs = plt.subplots(1, 2) + +dtVals = tEnd / nStepsList + +i = 0 +for qDelta in qDeltaList: + for nSweeps in nSweepList: + sym = symList[i] + i += 1 + + name = f"{qDelta}({nSweeps})" + try: + params = getParamsRK(qDelta) + name = name[:-3] + except KeyError: + params = getParamsSDC( + quadType=quadType, numNodes=nNodes, nodeType=nodeType, qDeltaI=qDelta, nSweeps=nSweeps + ) + print(f'computing for {name} ...') + + errors = [] + costs = [] + + for nSteps in nStepsList: + print(f' -- nSteps={nSteps} ...') + + uRef = solutionExact(tEnd, nSteps, pName, **pParams) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, pName, **pParams) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + cost /= nNodes * parEfficiency + costs.append(cost) + + # error VS dt + axs[0].loglog(dtVals, errors, sym + '-', label=name) + # error VS cost + axs[1].loglog(costs, errors, sym + '-', label=name) + +for i in range(2): + axs[i].set( + xlabel=r"$\Delta{t}$" if i == 0 else "cost", + ylabel=r"$L_\infty$ error", + ylim=(1e-5, 1e1), + ) + axs[i].legend() + axs[i].grid() + +fig.set_size_inches(12, 5) +fig.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/allenCahn_setup.py b/pySDC/projects/parallelSDC_reloaded/allenCahn_setup.py new file mode 100644 index 0000000000..82a80ffe24 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/allenCahn_setup.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Dec 7 21:22:52 2023 + +Setup script for the Allen-Cahn problem +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact, getParamsRK, solutionSDC, getParamsSDC + +script = __file__.split('/')[-1].split('.')[0] + +tEnd = 50 +nSteps = 50 + +useRK = True +if useRK: + rkScheme = "ESDIRK53" + params = getParamsRK(rkScheme) +else: # pragma: no cover + nNodes = 4 + nSweeps = 5 + quadType = 'RADAU-RIGHT' + nodeType = 'LEGENDRE' + qDelta = "MIN-SR-S" + params = getParamsSDC(quadType, nNodes, qDelta, nSweeps, nodeType) + +pName = "ALLEN-CAHN" +periodic = False +pParams = { + "periodic": periodic, + "nvars": 2**11 - (not periodic), + "epsilon": 0.04, +} + +tVals = np.linspace(0, tEnd, nSteps + 1) + +print("Computing ODE solution") +uExact = solutionExact(tEnd, nSteps, pName, **pParams) + + +uNum, counters, _ = solutionSDC(tEnd, nSteps, params, pName, **pParams) + +figName = f"{script}_solution" +plt.figure(figName) +plt.plot(uExact[0, :], '-', label="$u(0)$") +plt.plot(uExact[-1, :], '-', label="$u_{exact}(T)$") +plt.plot(uNum[-1, :], '--', label="$u_{num}(T)$") + + +plt.legend() +plt.xlabel("X") +plt.ylabel("solution") +plt.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/chemicalReaction_accuracy.py b/pySDC/projects/parallelSDC_reloaded/chemicalReaction_accuracy.py new file mode 100644 index 0000000000..2566daaf5b --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/chemicalReaction_accuracy.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 5 11:02:39 2023 + +Script to investigate diagonal SDC on the ProtheroRobinson +(linear and non-linear) problem : + +- error VS time-step +- error VS computation cost + +Note : implementation in progress ... +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import getParamsSDC, getParamsRK, solutionSDC, solutionExact + +# Problem parameters +tEnd = 300 +pName = "CHEMREC" + + +def getError(uNum, uRef): + if uNum is None: # pragma: no cover + return np.inf + return max(np.linalg.norm(uRef[:, 0] - uNum[:, 0], np.inf), np.linalg.norm(uRef[:, 1] - uNum[:, 1], np.inf)) + + +def getCost(counters): + nNewton, nRHS, tComp = counters + return nNewton + nRHS + + +# Base variable parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 1 / nNodes + +qDeltaList = [ + 'RK4', + 'ESDIRK53', + 'ESDIRK43', + # 'IE', 'LU', 'IEpar', 'PIC', + 'MIN-SR-NS', + 'MIN-SR-S', + 'MIN-SR-FLEX', + # "MIN3", +] +nStepsList = np.array([2, 5, 10, 20]) +nSweepList = [1, 2, 3, 4, 5, 6] + +qDeltaList = ['ESDIRK43', 'MIN-SR-S', 'MIN-SR-FLEX'] +nSweepList = [4] + + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 +fig, axs = plt.subplots(1, 2) + +dtVals = tEnd / nStepsList + +i = 0 +for qDelta in qDeltaList: + for nSweeps in nSweepList: + sym = symList[i] + i += 1 + + name = f"{qDelta}({nSweeps})" + try: + params = getParamsRK(qDelta) + name = name[:-3] + except KeyError: + params = getParamsSDC( + quadType=quadType, numNodes=nNodes, nodeType=nodeType, qDeltaI=qDelta, nSweeps=nSweeps + ) + print(f'computing for {name} ...') + + errors = [] + costs = [] + + for nSteps in nStepsList: + print(f' -- nSteps={nSteps} ...') + + uRef = solutionExact(tEnd, nSteps, pName) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, pName) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + cost /= nNodes * parEfficiency + costs.append(cost) + + # error VS dt + axs[0].loglog(dtVals, errors, sym + '-', label=name) + # error VS cost + axs[1].loglog(costs, errors, sym + '-', label=name) + +for i in range(2): + axs[i].set( + xlabel=r"$\Delta{t}$" if i == 0 else "cost", + ylabel=r"$L_\infty$ error", + ylim=(1e-9, 1e0), + ) + axs[i].legend(loc="lower right" if i == 0 else "lower left") + axs[i].grid() + +fig.set_size_inches(12, 5) +fig.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/chemicalReaction_setup.py b/pySDC/projects/parallelSDC_reloaded/chemicalReaction_setup.py new file mode 100644 index 0000000000..9f9b8c055c --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/chemicalReaction_setup.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Dec 7 21:22:52 2023 + +Setup script for the Chemical Reaction problem +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact, getParamsRK, solutionSDC + +script = __file__.split('/')[-1].split('.')[0] + +tEnd = 300 +nSteps = 10 +rkScheme = "RK4" + +tVals = np.linspace(0, tEnd, nSteps + 1) + +print("Computing ODE solution") +uExact = solutionExact(tEnd, nSteps, "CHEMREC") + +params = getParamsRK(rkScheme) +uNum, counters, _ = solutionSDC(tEnd, nSteps, params, 'CHEMREC') + +figName = f"{script}_solution" +plt.figure(figName) +plt.plot(tVals, uExact[:, 0], '-', label="c1-exact") +plt.plot(tVals, uExact[:, 1], '-', label="c2-exact") +plt.plot(tVals, uExact[:, 2], '-', label="c3-exact") +plt.plot(tVals, uNum[:, 0], '--', label="c1-num") +plt.plot(tVals, uNum[:, 1], '--', label="c2-num") +plt.plot(tVals, uNum[:, 2], '--', label="c3-num") + +plt.legend() +plt.xlabel("time") +plt.ylabel("solution") +plt.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/convergence.py b/pySDC/projects/parallelSDC_reloaded/convergence.py new file mode 100644 index 0000000000..04128c661c --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/convergence.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Jan 9 14:44:41 2024 + +Generate convergence plots on Dahlquist for SDC with given parameters +""" +import numpy as np +from pySDC.projects.parallelSDC_reloaded.utils import getParamsRK, getParamsSDC, solutionSDC, plt + +SCRIPT = __file__.split('/')[-1].split('.')[0] + +# Script parameters +lam = 1j +tEnd = 2 * np.pi +nStepsList = np.array([2, 5, 10, 20, 50, 100, 200, 500, 1000]) +dtVals = tEnd / nStepsList + + +def getError(uNum, uRef): + if uNum is None: # pragma: no cover + return np.inf + return np.linalg.norm(uRef - uNum[:, 0], np.inf) + + +# Collocation parameters +nNodes = 4 +nodeType = "LEGENDRE" +quadType = "RADAU-RIGHT" +sweepType = "MIN-SR-NS" + +# Schemes parameters +schemes = [("RK4", None), ("ESDIRK43", None), *[(sweepType, i) for i in [1, 2, 3, 4]][:1]] + +styles = [ + dict(ls=":", c="gray"), + dict(ls="-.", c="gray"), + dict(ls="-", marker='o'), + dict(ls="-", marker='>'), + dict(ls="-", marker='s'), + dict(ls="-", marker='^'), + dict(ls="-", marker='*'), +] + +# ----------------------------------------------------------------------------- +# Script execution +# ----------------------------------------------------------------------------- +plt.figure() +for (qDelta, nSweeps), style in zip(schemes, styles): + if nSweeps is None: + params = getParamsRK(qDelta) + label = None + else: + params = getParamsSDC(quadType, nNodes, qDelta, nSweeps, nodeType) + label = f"$K={nSweeps}$" + errors = [] + + for nSteps in nStepsList: + uNum, counters, parallel = solutionSDC(tEnd, nSteps, params, 'DAHLQUIST', lambdas=np.array([lam])) + + tVals = np.linspace(0, tEnd, nSteps + 1) + uExact = np.exp(lam * tVals) + + err = getError(uNum, uExact) + errors.append(err) + + plt.loglog(dtVals, errors, **style, label=label) + if nSweeps is not None: + plt.loglog(dtVals, (0.1 * dtVals) ** nSweeps, '--', c='gray', lw=1.5) + +plt.title(sweepType) +plt.legend() +plt.xlabel(r"$\Delta{t}$") +plt.ylabel(r"$L_\infty$ error") +plt.grid(True) +plt.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/jacobiElliptic_accuracy.py b/pySDC/projects/parallelSDC_reloaded/jacobiElliptic_accuracy.py new file mode 100644 index 0000000000..4012c0ca82 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/jacobiElliptic_accuracy.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 5 11:02:39 2023 + +Script to investigate diagonal SDC on the JacobianElliptic problem + +- error VS time-step +- error VS computation cost + +Note : implementation in progress ... +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import getParamsSDC, getParamsRK, solutionSDC, solutionExact + +# Problem parameters +tEnd = 10 +pName = "JACELL" + + +def getError(uNum, uRef): + if uNum is None: # pragma: no cover + return np.inf + return np.linalg.norm(uRef[-1] - uNum[-1], np.inf) + + +def getCost(counters): + nNewton, nRHS, tComp = counters + return nNewton + nRHS + + +# Base variable parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 1 / nNodes + +qDeltaList = [ + 'RK4', + 'ESDIRK53', + 'ESDIRK43', + 'PIC', + # 'IE', 'LU', 'IEpar', 'PIC', + 'MIN-SR-NS', + 'MIN-SR-S', + 'MIN-SR-FLEX', + # "MIN3", +] +nStepsList = np.array([10, 20, 50, 100, 200]) +# nSweepList = [1, 2, 3, 4] + +# qDeltaList = ['RK4', 'ESDIRK43', 'MIN-SR-S'] +nSweepList = [4] + + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 +fig, axs = plt.subplots(1, 2) + +dtVals = tEnd / nStepsList + +i = 0 +for qDelta in qDeltaList: + for nSweeps in nSweepList: + sym = symList[i] + i += 1 + + name = f"{qDelta}({nSweeps})" + try: + params = getParamsRK(qDelta) + name = name[:-3] + if nSweeps != nSweepList[0]: # pragma: no cover + continue + except KeyError: + params = getParamsSDC( + quadType=quadType, numNodes=nNodes, nodeType=nodeType, qDeltaI=qDelta, nSweeps=nSweeps + ) + print(f'computing for {name} ...') + + errors = [] + costs = [] + + for nSteps in nStepsList: + print(f' -- nSteps={nSteps} ...') + + uRef = solutionExact(tEnd, nSteps, pName) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, pName) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + cost /= nNodes * parEfficiency + costs.append(cost) + + # error VS dt + axs[0].loglog(dtVals, errors, sym + '-', label=name) + # error VS cost + axs[1].loglog(costs, errors, sym + '-', label=name) + +for i in range(2): + axs[i].set( + xlabel=r"$\Delta{t}$" if i == 0 else "cost", + ylabel=r"$L_\infty$ error", + # ylim=(1e-9, 1e0), + ) + axs[i].legend(loc="lower right" if i == 0 else "lower left") + axs[i].grid() + +fig.set_size_inches(12, 5) +fig.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/jacobiElliptic_setup.py b/pySDC/projects/parallelSDC_reloaded/jacobiElliptic_setup.py new file mode 100644 index 0000000000..8c5cd5f3fd --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/jacobiElliptic_setup.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Dec 7 21:22:52 2023 + +Setup script for the JacobianElliptic problem +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact, getParamsRK, getParamsSDC, solutionSDC + +script = __file__.split('/')[-1].split('.')[0] + +tEnd = 10 +nSteps = 100 + +useRK = True +if useRK: + rkScheme = "RK4" + params = getParamsRK(rkScheme) +else: # pragma: no cover + nNodes = 4 + nSweeps = 5 + quadType = 'RADAU-RIGHT' + nodeType = 'LEGENDRE' + qDelta = "MIN-SR-S" + params = getParamsSDC(quadType, nNodes, qDelta, nSweeps, nodeType) + +pName = "JACELL" +periodic = False +pParams = {} + +tVals = np.linspace(0, tEnd, nSteps + 1) + +print("Computing ODE solution") +uExact = solutionExact(tEnd, nSteps, pName, **pParams) + +uNum, counters, _ = solutionSDC(tEnd, nSteps, params, pName, **pParams) + +figName = f"{script}_solution" +plt.figure(figName) +plt.plot(tVals, uExact[:, 0], '-', label="u1-exact") +plt.plot(tVals, uExact[:, 1], '-', label="u2-exact") +plt.plot(tVals, uExact[:, 2], '-', label="u3-exact") +plt.plot(tVals, uNum[:, 0], '--', label="u1-num") +plt.plot(tVals, uNum[:, 1], '--', label="u2-num") +plt.plot(tVals, uNum[:, 2], '--', label="u3-num") + +plt.legend() +plt.xlabel("time") +plt.ylabel("solution") +plt.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/kaps_accuracy.py b/pySDC/projects/parallelSDC_reloaded/kaps_accuracy.py new file mode 100644 index 0000000000..1c9ab079d1 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/kaps_accuracy.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 5 11:02:39 2023 + +Script to investigate diagonal SDC on the ProtheroRobinson +(linear and non-linear) problem : + +- error VS time-step +- error VS computation cost + +Note : implementation in progress ... +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import getParamsSDC, getParamsRK, solutionSDC, solutionExact + +# Problem parameters +tEnd = 1 +pName = "KAPS" +pParams = { + "epsilon": 1e-6, +} + + +def getError(uNum, uRef): + if uNum is None: + return np.inf + return max(np.linalg.norm(uRef[:, 0] - uNum[:, 0], np.inf), np.linalg.norm(uRef[:, 1] - uNum[:, 1], np.inf)) + + +def getCost(counters): + nNewton, nRHS, tComp = counters + return nNewton + nRHS + + +# Base variable parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 0.8 # 1/nNodes + +qDeltaList = [ + 'RK4', + 'ESDIRK53', + 'DIRK43', + # 'IE', 'LU', 'IEpar', 'PIC', + 'MIN-SR-NS', + 'MIN-SR-S', + 'MIN-SR-FLEX', + # "MIN3", +] +nStepsList = np.array([2, 5, 10, 20, 50, 100]) +nSweepList = [1, 2, 3, 4, 5, 6] + +# qDeltaList = ['MIN-SR-S'] +nSweepList = [4] + + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 +fig, axs = plt.subplots(1, 2) + +dtVals = tEnd / nStepsList + +i = 0 +for qDelta in qDeltaList: + for nSweeps in nSweepList: + sym = symList[i] + i += 1 + + name = f"{qDelta}({nSweeps})" + try: + params = getParamsRK(qDelta) + name = name[:-3] + except KeyError: + params = getParamsSDC( + quadType=quadType, numNodes=nNodes, nodeType=nodeType, qDeltaI=qDelta, nSweeps=nSweeps + ) + print(f'computing for {name} ...') + + errors = [] + costs = [] + + for nSteps in nStepsList: + print(f' -- nSteps={nSteps} ...') + + uRef = solutionExact(tEnd, nSteps, pName, **pParams) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, pName, **pParams) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + cost /= nNodes * parEfficiency + costs.append(cost) + + # error VS dt + axs[0].loglog(dtVals, errors, sym + '-', label=name) + # error VS cost + axs[1].loglog(costs, errors, sym + '-', label=name) + +for i in range(2): + axs[i].set( + xlabel=r"$\Delta{t}$" if i == 0 else "cost", + ylabel=r"$L_\infty$ error", + ylim=(1e-17, 1e0), + ) + axs[i].legend(loc="lower right" if i == 0 else "lower left") + axs[i].grid() + +fig.set_size_inches(12, 5) +fig.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/kaps_setup.py b/pySDC/projects/parallelSDC_reloaded/kaps_setup.py new file mode 100644 index 0000000000..68ce5e53b0 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/kaps_setup.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Dec 7 19:29:46 2023 + +Setup script for the Kaps problem +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact, getParamsRK, solutionSDC + +script = __file__.split('/')[-1].split('.')[0] + +tEnd = 1 +nSteps = 100 +epsilon = 1e-3 +rkScheme = "DIRK43" + +tVals = np.linspace(0, tEnd, nSteps + 1) + +print("Computing ODE solution") +uExact = solutionExact(tEnd, nSteps, "KAPS", epsilon=epsilon) + +params = getParamsRK(rkScheme) +uNum, counters, parallel = solutionSDC(tEnd, nSteps, params, 'KAPS', epsilon=epsilon) + +figName = f"{script}_solution" +plt.figure(figName) +plt.plot(tVals, uExact[:, 0], '-', label="x-exact") +plt.plot(tVals, uExact[:, 1], '-', label="y-exact") +plt.plot(tVals, uNum[:, 0], '--', label="x-num") +plt.plot(tVals, uNum[:, 1], '--', label="y-num") + +plt.legend() +plt.xlabel("time") +plt.ylabel("solution") +plt.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/lorenz_accuracy.py b/pySDC/projects/parallelSDC_reloaded/lorenz_accuracy.py new file mode 100644 index 0000000000..9482ceb745 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/lorenz_accuracy.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 5 11:02:39 2023 + +Script to investigate diagonal SDC on Lorenz system + +- error VS time-step +- error VS computation cost + +Note : implementation in progress ... +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import getParamsSDC, getParamsRK, solutionSDC, solutionExact + +tEnd = 1.24 + + +def getError(uNum, uRef): + if uNum is None: # pragma: no cover + return np.inf + return np.linalg.norm(np.linalg.norm(uRef - uNum, np.inf, axis=-1), np.inf) + + +def getCost(counters): + nNewton, nRHS, tComp = counters + return nNewton + nRHS + + +# Base variable parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 0.8 # 1/nNodes + +qDeltaList = [ + 'RK4', + 'ESDIRK53', + 'VDHS', + 'MIN', + # 'IE', 'LU', 'IEpar', 'PIC', + 'MIN-SR-NS', + 'MIN-SR-S', + 'MIN-SR-FLEX', + "PIC", + # "MIN3", +] +nStepsList = np.array([2, 5, 10, 20, 50, 100, 200]) +nSweepList = [1, 2, 3, 4] + + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 + +qDeltaList = ['MIN-SR-S', 'RK4'] +# nSweepList = [4] + +fig, axs = plt.subplots(1, 2) + + +dtVals = tEnd / nStepsList + +i = 0 +for qDelta in qDeltaList: + for nSweeps in nSweepList: + sym = symList[i] + i += 1 + + name = f"{qDelta}({nSweeps})" + try: + params = getParamsRK(qDelta) + name = name[:-3] + if nSweeps != nSweepList[0]: + continue + except KeyError: + params = getParamsSDC( + quadType=quadType, numNodes=nNodes, nodeType=nodeType, qDeltaI=qDelta, nSweeps=nSweeps + ) + print(f'computing for {name} ...') + + errors = [] + costs = [] + + for nSteps in nStepsList: + print(f' -- nSteps={nSteps} ...') + + uRef = solutionExact(tEnd, nSteps, "LORENZ", u0=(5, -5, 20)) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, "LORENZ", u0=(5, -5, 20)) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + cost /= nNodes * parEfficiency + costs.append(cost) + + # error VS dt + axs[0].loglog(dtVals, errors, sym + '-', label=name) + # error VS cost + axs[1].loglog(costs, errors, sym + '-', label=name) + +x = dtVals[4:] +for k in [1, 2, 3, 4, 5]: + axs[0].loglog(x, 1e4 * x**k, "--", color="gray", linewidth=0.8) + +for i in range(2): + axs[i].set( + xlabel=r"$\Delta{t}$" if i == 0 else "cost", + ylabel=r"$L_\infty$ error", + ylim=(8.530627786509715e-12, 372.2781393394293), + ) + axs[i].legend(loc="lower right" if i == 0 else "lower left") + axs[i].grid() + +fig.set_size_inches(12, 5) +fig.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/lorenz_setup.py b/pySDC/projects/parallelSDC_reloaded/lorenz_setup.py new file mode 100644 index 0000000000..86b651c510 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/lorenz_setup.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 5 10:08:04 2023 + +Script to numerically compute number revolution periods for the Lorenz system +""" +import numpy as np +from scipy import signal +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact + +script = __file__.split('/')[-1].split('.')[0] + +tEnd = 2 +nSteps = tEnd * 50 +tVals = np.linspace(0, tEnd, nSteps + 1) + +nPeriods = 2 + +print(f"Computing exact solution up to t={tEnd} ...") +uExact = solutionExact(tEnd, nSteps, "LORENZ", u0=(5, -5, 20)) + +z = uExact[:, -1] +idx = signal.find_peaks(z)[0][nPeriods - 1] + + +print(f'tEnd for {nPeriods} periods : {tVals[idx]}') + +figName = f"{script}_traj" + +plt.figure(figName) +plt.plot(tVals, uExact[:, 0], '-', label="$x(t)$") +plt.plot(tVals, uExact[:, 1], '-', label="$y(t)$") +plt.plot(tVals, uExact[:, 2], '-', label="$z(t)$") +plt.vlines(tVals[idx], ymin=-20, ymax=40, linestyles="--", linewidth=1) + +plt.legend() +plt.xlabel("time") +plt.ylabel("trajectory") +plt.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/nilpotency.py b/pySDC/projects/parallelSDC_reloaded/nilpotency.py new file mode 100644 index 0000000000..f3a2a64e64 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/nilpotency.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Dec 22 17:17:14 2023 + +Evaluate the nilpotency of diagonal preconditionners MIN-SR-S and MIN-SR-NS +with increasing number of nodes. +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.core.Sweeper import sweeper + +quadType = "LOBATTO" +nodeType = "LEGENDRE" + + +def nilpotencyS(d, Q): + if quadType in ['LOBATTO', 'RADAU-LEFT']: + d = d[1:] + Q = Q[1:, 1:] + M = d.size + D = np.diag(1 / d) + K = np.eye(M) - D @ Q + return np.linalg.norm(np.linalg.matrix_power(K, M), ord=np.inf) + + +def nilpotencyNS(d, Q): + M = d.size + D = np.diag(d) + K = D - Q + return np.linalg.norm(np.linalg.matrix_power(K, M), ord=np.inf) + + +nil_MIN_SR_S = [] +nil_MIN_SR_NS = [] +nNodes = range(2, 20) +for m in nNodes: + s = sweeper({"num_nodes": m, "quad_type": quadType, "node_type": nodeType}) + Q = s.coll.Qmat[1:, 1:] + nodes = s.coll.nodes + + qDelta = s.get_Qdelta_implicit(s.coll, qd_type="MIN-SR-S") + d = np.diag(qDelta)[1:] + nil_MIN_SR_S.append([nilpotencyS(d, Q), nilpotencyNS(d, Q)]) + + qDelta = s.get_Qdelta_implicit(s.coll, qd_type="MIN-SR-NS") + d = np.diag(qDelta)[1:] + nil_MIN_SR_NS.append([nilpotencyS(d, Q), nilpotencyNS(d, Q)]) + +nil_MIN_SR_NS = np.array(nil_MIN_SR_NS).T +nil_MIN_SR_S = np.array(nil_MIN_SR_S).T + +fig = plt.figure("nilpotency") +nNodes = np.array(nNodes, np.float128) +plt.semilogy(nNodes, nil_MIN_SR_NS[0], 'o-', label="MIN-SR-NS (nill. stiff)") +plt.semilogy(nNodes, nil_MIN_SR_NS[1], 'o--', label="MIN-SR-NS (nill. non-stiff)") +plt.semilogy(nNodes, nil_MIN_SR_S[0], 's-', label="MIN-SR-S (nill. stiff)") +plt.semilogy(nNodes, nil_MIN_SR_S[1], 's--', label="MIN-SR-S (nill. non-stiff)") +plt.legend() +plt.semilogy(nNodes, 14**nNodes * 1e-17, ':', c="gray") +plt.grid(True) +plt.xlabel("M") +plt.ylabel("nilpotency") +fig.set_size_inches(8.65, 5.33) +plt.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/protheroRobinsonAutonomous_accuracy.py b/pySDC/projects/parallelSDC_reloaded/protheroRobinsonAutonomous_accuracy.py new file mode 100644 index 0000000000..27bcfe67ba --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/protheroRobinsonAutonomous_accuracy.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 5 11:02:39 2023 + +Script to investigate diagonal SDC on the ProtheroRobinson +(linear and non-linear) problem, using the autonomous formulation. + +- error VS time-step +- error VS computation cost + +Note : implementation in progress ... +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import getParamsSDC, getParamsRK, solutionSDC, solutionExact + +# Problem parameters +tEnd = 2 * np.pi +nonLinear = True +epsilon = 0.001 +collUpdate = False +initSweep = "spread" + +pName = "PROTHERO-ROBINSON-A" + (nonLinear) * "-NL" + + +def getError(uNum, uRef): + if uNum is None: + return np.inf + return np.linalg.norm(uRef[:, 0] - uNum[:, 0], np.inf) + + +def getCost(counters): + nNewton, nRHS, tComp = counters + return nNewton + nRHS + + +# Base variable parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 0.8 + +qDeltaList = [ + 'MIN-SR-NS', + 'MIN-SR-S', + 'MIN-SR-FLEX', + "ESDIRK43", + 'LU', + 'VDHS', + # 'IE', 'LU', 'IEpar', 'PIC', + # "MIN3", +] +nStepsList = np.array([2, 5, 10, 20, 50, 100, 200]) +# nSweepList = [1, 2, 3, 4] + +# qDeltaList = ['ESDIRK43', 'ESDIRK53', 'VDHS'] +nSweepList = [4] + + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 +fig, axs = plt.subplots(1, 2) + +dtVals = tEnd / nStepsList + +i = 0 +for qDelta in qDeltaList: + for nSweeps in nSweepList: + sym = symList[i] + i += 1 + + name = f"{qDelta}({nSweeps})" + try: + params = getParamsRK(qDelta) + name = name.split('(')[0] + if nSweeps != nSweepList[0]: # pragma: no cover + continue + + except KeyError: + params = getParamsSDC( + quadType=quadType, + numNodes=nNodes, + nodeType=nodeType, + qDeltaI=qDelta, + nSweeps=nSweeps, + collUpdate=collUpdate, + initType=initSweep, + ) + print(f'computing for {name} ...') + + errors = [] + costs = [] + + for nSteps in nStepsList: + print(f' -- nSteps={nSteps} ...') + + uRef = solutionExact(tEnd, nSteps, pName, epsilon=epsilon) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, pName, epsilon=epsilon) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + cost /= nNodes * parEfficiency + costs.append(cost) + + # error VS dt + axs[0].loglog(dtVals, errors, sym + '-', label=name) + # error VS cost + axs[1].loglog(costs, errors, sym + '-', label=name) + +for i in range(2): + axs[i].set( + xlabel=r"$\Delta{t}$" if i == 0 else "cost", + ylabel=r"$L_\infty$ error", + ylim=(1e-12, 1e3), + ) + axs[i].legend() + axs[i].grid() + +fig.set_size_inches(12, 5) +fig.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/protheroRobinsonAutonomous_setup.py b/pySDC/projects/parallelSDC_reloaded/protheroRobinsonAutonomous_setup.py new file mode 100644 index 0000000000..19c5baa4ed --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/protheroRobinsonAutonomous_setup.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Dec 7 13:12:40 2023 + +Setup script for the ProtheroRobinson (linear and non-linear) problem, +using the autonomous formulation +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact, getParamsRK, solutionSDC + +script = __file__.split('/')[-1].split('.')[0] + +tEnd = 2 * np.pi +nSteps = 2 +epsilon = 1e-3 +rkScheme = "ESDIRK43" + +tVals = np.linspace(0, tEnd, nSteps + 1) +tValsFine = np.linspace(0, tEnd, nSteps * 10 + 1) + +for pType in ["linear", "nonlinear"]: + probName = "PROTHERO-ROBINSON-A" + if pType == "nonlinear": + probName += "-NL" + + print(f"Computing {pType} ODE solution") + uExact = solutionExact(tEnd, nSteps, probName, epsilon=epsilon) + + params = getParamsRK(rkScheme) + uNum, counters, parallel = solutionSDC(tEnd, nSteps, params, probName, epsilon=epsilon) + uNumFine, counters, parallel = solutionSDC(tEnd, nSteps * 10, params, probName, epsilon=epsilon) + + figName = f"{script}_{pType}" + plt.figure(figName) + plt.plot(tVals, uExact[:, 0], '-', label="Exact") + plt.plot(tVals, uNum[:, 0], '--', label="Numerical") + plt.plot(tValsFine, uNumFine[:, 0], '-.', label="Numerical (fine)") + + +for figName in [f"{script}_linear", f"{script}_nonlinear"]: + plt.figure(figName) + plt.legend() + plt.xlabel("time") + plt.ylabel("solution") + plt.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/protheroRobinson_accuracy.py b/pySDC/projects/parallelSDC_reloaded/protheroRobinson_accuracy.py new file mode 100644 index 0000000000..b5f3e2d0b5 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/protheroRobinson_accuracy.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Dec 5 11:02:39 2023 + +Script to investigate diagonal SDC on the ProtheroRobinson +(linear and non-linear) problem : + +- error VS time-step +- error VS computation cost + +Note : implementation in progress ... +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import getParamsSDC, getParamsRK, solutionSDC, solutionExact + +# Problem parameters +tEnd = 2 * np.pi +nonLinear = False +epsilon = 1e-3 +collUpdate = False +initSweep = "copy" + +pName = "PROTHERO-ROBINSON" + (nonLinear) * "-NL" + + +def getError(uNum, uRef): + if uNum is None: + return np.inf + return np.linalg.norm(uRef[:, 0] - uNum[:, 0], np.inf) + + +def getCost(counters): + nNewton, nRHS, tComp = counters + return nNewton + nRHS + + +# Base variable parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 0.8 + +qDeltaList = [ + 'MIN-SR-NS', + 'MIN-SR-S', + 'MIN-SR-FLEX', + "ESDIRK43", + 'LU', + 'VDHS', + # 'IE', 'LU', 'IEpar', 'PIC', + # "MIN3", +] +nStepsList = np.array([2, 5, 10, 20, 50, 100, 200]) +# nSweepList = [1, 2, 3, 4] + +# qDeltaList = ['ESDIRK43', 'ESDIRK53', 'VDHS'] +nSweepList = [6] + + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 +fig, axs = plt.subplots(1, 2) + +dtVals = tEnd / nStepsList + +i = 0 +for qDelta in qDeltaList: + for nSweeps in nSweepList: + sym = symList[i] + i += 1 + + name = f"{qDelta}({nSweeps})" + try: + params = getParamsRK(qDelta) + name = name.split('(')[0] + if nSweeps != nSweepList[0]: # pragma: no cover + continue + + except KeyError: + params = getParamsSDC( + quadType=quadType, + numNodes=nNodes, + nodeType=nodeType, + qDeltaI=qDelta, + nSweeps=nSweeps, + collUpdate=collUpdate, + initType=initSweep, + ) + print(f'computing for {name} ...') + + errors = [] + costs = [] + + for nSteps in nStepsList: + print(f' -- nSteps={nSteps} ...') + + uRef = solutionExact(tEnd, nSteps, pName, epsilon=epsilon) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, pName, epsilon=epsilon) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + cost /= nNodes * parEfficiency + costs.append(cost) + + # error VS dt + axs[0].loglog(dtVals, errors, sym + '-', label=name) + # error VS cost + axs[1].loglog(costs, errors, sym + '-', label=name) + +for i in range(2): + axs[i].set( + xlabel=r"$\Delta{t}$" if i == 0 else "cost", + ylabel=r"$L_\infty$ error", + ylim=(1e-12, 1e3), + ) + axs[i].legend() + axs[i].grid() + +fig.set_size_inches(12, 5) +fig.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/protheroRobinson_setup.py b/pySDC/projects/parallelSDC_reloaded/protheroRobinson_setup.py new file mode 100644 index 0000000000..ea60cd9664 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/protheroRobinson_setup.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Dec 7 13:12:40 2023 + +Setup script for the ProtheroRobinson (linear and non-linear) problem +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact, getParamsRK, solutionSDC + +script = __file__.split('/')[-1].split('.')[0] + +tEnd = 2 * np.pi +nSteps = 2 +epsilon = np.inf +rkScheme = "ESDIRK43" + +tVals = np.linspace(0, tEnd, nSteps + 1) +tValsFine = np.linspace(0, tEnd, nSteps * 10 + 1) + +for pType in ["linear", "nonlinear"]: + probName = "PROTHERO-ROBINSON" + if pType == "nonlinear": + probName += "-NL" + + print(f"Computing {pType} ODE solution") + uExact = solutionExact(tEnd, nSteps, "PROTHERO-ROBINSON", epsilon=epsilon) + + params = getParamsRK(rkScheme) + uNum, counters, parallel = solutionSDC(tEnd, nSteps, params, probName, epsilon=epsilon) + uNumFine, counters, parallel = solutionSDC(tEnd, nSteps * 10, params, probName, epsilon=epsilon) + + figName = f"{script}_{pType}" + plt.figure(figName) + plt.plot(tVals, uExact[:, 0], '-', label="Exact") + plt.plot(tVals, uNum[:, 0], '--', label="Numerical") + plt.plot(tValsFine, uNumFine[:, 0], '-.', label="Numerical (fine)") + + +for figName in [f"{script}_linear", f"{script}_nonlinear"]: + plt.figure(figName) + plt.legend() + plt.xlabel("time") + plt.ylabel("solution") + plt.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/scripts/__init__.py b/pySDC/projects/parallelSDC_reloaded/scripts/__init__.py new file mode 100644 index 0000000000..31ee6c4d07 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/scripts/__init__.py @@ -0,0 +1,5 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Feb 4 09:52:49 2024 +""" diff --git a/pySDC/projects/parallelSDC_reloaded/scripts/_dataRef.json b/pySDC/projects/parallelSDC_reloaded/scripts/_dataRef.json new file mode 100644 index 0000000000..06808f3078 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/scripts/_dataRef.json @@ -0,0 +1,1999 @@ +{ + "fig05_allenCahn_conv_0_MIN-SR-NS": [Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity + ], + "fig05_allenCahn_conv_0_MIN-SR-S": [Infinity, Infinity, + 0.08446537069000407, + 0.02891511006232282, + 0.009359670325087922, + 0.001767579860121379, + 0.0005909667201255001, + 0.0003061432031026652, + 0.00023374010540145703 + ], + "fig05_allenCahn_conv_0_MIN-SR-FLEX": [Infinity, Infinity, + 0.0396190805552386, + 0.003964060100959606, + 0.0003691156499204657, + 7.944071577391686e-05, + 0.000155031091490917, + 0.00019389551312441983, + 0.00021499308058079686 + ], + "fig05_allenCahn_conv_0_VDHS": [Infinity, + 0.19321965556422843, + 0.5345708076570097, + 0.2014047982991399, + 0.056209742223901835, + 0.00840458872364862, + 0.001698984589120872, + 0.00016807940176517884, + 0.00018753795544307371 + ], + "fig05_allenCahn_conv_0_ESDIRK43": [Infinity, + 2.171214365559374, + 0.22484822454317072, + 0.018644029886009206, + 0.0013501854462701615, + 0.00023722702915712814, + 0.00022105433212314375, + 0.00022273327778025385, + 0.00022354533745006197 + ], + "fig05_allenCahn_conv_0_LU": [Infinity, Infinity, + 0.004808051505233471, + 0.0006654205463199496, + 0.0002662368479721481, + 0.00022781884527065633, + 0.00022518481870875735, + 0.00022443406747357908, + 0.00022951363585211392 + ], + "fig04_protheroRobinson_conv_0_MIN-SR-NS": [ + 0.16043962210305474, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, + 1.440060342083882e-07, + 2.2560359691503606e-08 + ], + "fig04_protheroRobinson_conv_0_MIN-SR-S": [ + 8.320184141030751e-05, + 3.1197879747368873e-06, + 1.0144571577219708e-06, + 9.096042259404413e-07, + 7.523811357268428e-07, + 5.634340353166678e-07, + 3.4031203399731424e-07, + 1.0673582206965904e-07, + 2.57452117402579e-08 + ], + "fig04_protheroRobinson_conv_0_MIN-SR-FLEX": [ + 8.489126705191197e-05, + 4.668047397571318e-06, + 2.790071002123362e-06, + 2.5202576144911504e-06, + 1.9786079379358057e-06, + 1.384962803219203e-06, + 7.680288652833767e-07, + 1.8448989702601892e-07, + 2.7963735882430285e-08 + ], + "fig04_protheroRobinson_conv_0_VDHS": [ + 1.890219776723434e-05, + 2.9799457909873706e-05, + 2.0174175104914482e-05, + 1.4209183914593027e-05, + 9.317029488586925e-06, + 6.205201703135543e-06, + 3.2945948167384387e-06, + 7.518282080232197e-07, + 1.2403659832571634e-07 + ], + "fig04_protheroRobinson_conv_0_ESDIRK43": [ + 0.0005093284263864017, + 8.840713503965869e-05, + 2.1154331917405678e-05, + 5.2849795018627965e-06, + 8.287363662679015e-07, + 1.9819473429105727e-07, + 4.377809902089691e-08, + 4.293470121097265e-09, + 4.941030301922056e-10 + ], + "fig04_protheroRobinson_conv_0_LU": [ + 8.116737697250187e-05, + 2.227733733539683e-06, + 1.4311622936746815e-07, + 1.1482910844407002e-08, + 1.3784208219291827e-08, + 1.6389903101377e-08, + 1.181608688671787e-08, + 1.5014478549346677e-09, + 1.8710251126208277e-09 + ], + "fig04_protheroRobinson_conv_1_MIN-SR-NS": [ + 7.30371396793827, Infinity, Infinity, Infinity, Infinity, Infinity, + 2.1717775050111854e-05, + 1.6947220871088575e-07, + 4.0878700424684666e-10 + ], + "fig04_protheroRobinson_conv_1_MIN-SR-S": [ + 8.12045931322114e-05, + 2.24192650638555e-06, + 1.473240173188728e-07, + 8.852951038917922e-09, + 1.798949900155833e-08, + 2.0804090783421714e-08, + 8.20330170459016e-09, + 5.4582118824697545e-09, + 2.7277079484022693e-09 + ], + "fig04_protheroRobinson_conv_1_MIN-SR-FLEX": [ + 8.636998630784376e-05, + 5.608272452994889e-06, + 3.6692412339611735e-06, + 2.8100552619259034e-06, + 1.3508174846466048e-06, + 4.1292747054999523e-07, + 1.4597440034513909e-08, + 1.2456305209518348e-08, + 2.728971937315805e-09 + ], + "fig04_protheroRobinson_conv_1_VDHS": [ + 8.124124800135135e-05, + 2.272937983105461e-06, + 1.701986499513808e-07, + 9.253153765431676e-08, + 1.3224594719307703e-07, + 1.2123170878286516e-07, + 4.4162449586160335e-08, + 2.3174743790654873e-08, + 8.722048194798049e-09 + ], + "fig04_protheroRobinson_conv_1_ESDIRK43": [ + 0.0005093284263864017, + 8.840713503965869e-05, + 2.1154331917405678e-05, + 5.2849795018627965e-06, + 8.287363662679015e-07, + 1.9819473429105727e-07, + 4.377809902089691e-08, + 4.293470121097265e-09, + 4.941030301922056e-10 + ], + "fig04_protheroRobinson_conv_1_LU": [ + 8.11675360774533e-05, + 2.2272239728615695e-06, + 1.4324239350216317e-07, + 8.631915602498352e-09, + 2.2413865208292805e-10, + 2.091979922624887e-10, + 5.1993187533128093e-11, + 1.2331546894728262e-10, + 4.977940282202553e-11 + ], + "fig03_lorenz_conv_PIC_$K=1$": [ + 67681205.71649154, + 1604639297538.775, + 1.788446770319228e+43, + 26.038392548526794, + 13.372741778639588, + 11.34889559291917, + 6.3995586381308165, + 3.620819171106671 + ], + "fig03_lorenz_conv_PIC_$K=2$": [ + 7.819070465916375e+50, NaN, + 13.351724704007069, + 3.547378470247729, + 0.6176644360717933, + 0.11879806667121429, + 0.015442847353449807, + 0.003561485112298257 + ], + "fig03_lorenz_conv_PIC_$K=3$": [NaN, + 19.799381748368454, + 13.237749624292569, + 0.9211025290129733, + 0.11869931836347192, + 0.01502594427117998, + 0.0009669109283763078, + 0.00012102528453183936 + ], + "fig03_lorenz_conv_PIC_$K=4$": [NaN, + 29.41136612093734, + 2.380517101530046, + 0.0332026307135429, + 0.0013657198465857334, + 6.352428903255714e-05, + 1.2844024617209016e-06, + 7.310388205894469e-08 + ], + "fig03_lorenz_conv_PIC_$K=5$": [NaN, + 1.7877261553173138, + 0.3142273731039822, + 0.005485709039071196, + 0.00018311118365943457, + 5.826593429958393e-06, + 6.0042481209166e-08, + 1.8812826851899445e-09 + ], + "fig03_lorenz_conv_MIN-SR-NS_$K=1$": [ + 225.21792648743696, + 136.42084302368366, + 27.157587748510718, + 13.343595414381037, + 11.27004763703939, + 7.5087589910731225, + 3.6171301145842243, + 1.9297102390492427 + ], + "fig03_lorenz_conv_MIN-SR-NS_$K=2$": [ + 32.90488586292916, + 11.752175617703474, + 4.805323467476697, + 0.4557648700864476, + 0.07650140611684719, + 0.014698115095615805, + 0.001920148255866394, + 0.00044390538899286014 + ], + "fig03_lorenz_conv_MIN-SR-NS_$K=3$": [ + 14.624578379904424, + 0.5346939810970284, + 0.041553302636680556, + 0.0009631505998228818, + 5.0596223477583635e-05, + 3.0280686686978697e-06, + 7.833393400602517e-08, + 4.912024564873718e-09 + ], + "fig03_lorenz_conv_MIN-SR-NS_$K=4$": [ + 16.217452889738567, + 0.0546509456450579, + 0.009336357792545158, + 8.067327115313105e-05, + 2.284149523745782e-06, + 6.934811835890287e-08, + 7.057892048578651e-10, + 7.648814914773538e-11 + ], + "fig03_lorenz_conv_MIN-SR-NS_$K=5$": [ + 9.154704936308956, + 0.07621878748729749, + 0.0002509432007666845, + 1.6872603509909823e-06, + 1.9030105491424365e-08, + 2.091358197731097e-10, + 4.9684700798025005e-11, + 7.648814914773538e-11 + ], + "fig03_lorenz_cost_0_MIN-SR-NS": [ + 0.0546509456450579, + 0.009336357792545158, + 8.067327115313105e-05, + 2.284149523745782e-06, + 6.934811835890287e-08, + 7.057892048578651e-10, + 7.648814914773538e-11 + ], + "fig03_lorenz_cost_0_MIN-SR-S": [ + 1.9287434051082037, + 0.10832440856908576, + 0.0017611980333889221, + 5.638746364411418e-05, + 1.569950427438016e-06, + 2.3746524746570685e-08, + 1.5012915355328005e-09 + ], + "fig03_lorenz_cost_0_MIN-SR-FLEX": [ + 3.5837961197815886, + 0.217358561202861, + 0.00496643239847927, + 0.00028952343006949377, + 1.0061587467191657e-05, + 9.354362617841616e-08, + 2.6501236760623215e-09 + ], + "fig03_lorenz_cost_0_LU": [ + 0.6989210472155172, + 0.22845443868738258, + 0.004508294448669403, + 0.00013817901455581705, + 4.367674454996973e-06, + 1.1594240589829496e-07, + 7.328716300492033e-09 + ], + "fig03_lorenz_cost_0_EE": [ + 3.516010310744873, + 0.06464280246092358, + 0.0008947257478801873, + 5.472298826258992e-05, + 3.413792453166309e-06, + 8.771417636666001e-08, + 5.488757892635476e-09 + ], + "fig03_lorenz_cost_0_PIC": [ + 29.41136612093734, + 2.380517101530046, + 0.0332026307135429, + 0.0013657198465857334, + 6.352428903255714e-05, + 1.2844024617209016e-06, + 7.310388205894469e-08 + ], + "fig03_lorenz_cost_1_MIN-SR-NS": [ + 0.0546509456450579, + 0.009336357792545158, + 8.067327115313105e-05, + 2.284149523745782e-06, + 6.934811835890287e-08, + 7.057892048578651e-10, + 7.648814914773538e-11 + ], + "fig03_lorenz_cost_1_MIN-SR-S": [ + 1.9287434051082037, + 0.10832440856908576, + 0.0017611980333889221, + 5.638746364411418e-05, + 1.569950427438016e-06, + 2.3746524746570685e-08, + 1.5012915355328005e-09 + ], + "fig03_lorenz_cost_1_MIN-SR-FLEX": [ + 3.5837961197815886, + 0.217358561202861, + 0.00496643239847927, + 0.00028952343006949377, + 1.0061587467191657e-05, + 9.354362617841616e-08, + 2.6501236760623215e-09 + ], + "fig03_lorenz_cost_1_VDHS": [ + 0.5573075506927019, + 0.03802418895329396, + 0.0004979941844496238, + 1.6712101515992117e-05, + 5.84324478580811e-07, + 8.561869435652625e-09, + 4.129194763891064e-10 + ], + "fig03_lorenz_cost_1_RK4": [ + 8.4867645815102, + 1.9038004729431073, + 0.02789165289470219, + 0.0010602578270120233, + 4.556282476997353e-05, + 8.435568688014428e-07, + 4.594594216200676e-08 + ], + "fig03_lorenz_cost_1_ESDIRK43": [ + 1.0414477669801077, + 0.05906753442312329, + 0.001273883913817997, + 7.275281019403224e-05, + 4.313766595487323e-06, + 1.067262260789903e-07, + 6.590447299004154e-09 + ], + "fig03_lorenz_cost_2_MIN-SR-NS": [ + 0.07621878748729749, + 0.0002509432007666845, + 1.6872603509909823e-06, + 1.9030105491424365e-08, + 2.091358197731097e-10, + 4.9684700798025005e-11, + 7.648814914773538e-11 + ], + "fig03_lorenz_cost_2_MIN-SR-S": [ + 1.0967974307495858, + 0.018626305586472824, + 3.362806956452857e-05, + 2.988167040030021e-06, + 1.1313348480257446e-07, + 1.2265992666016245e-09, + 1.2313350339354656e-10 + ], + "fig03_lorenz_cost_2_MIN-SR-FLEX": [ + 1.1178621898212064, + 0.018539394122594643, + 3.381578668992802e-05, + 2.983625694952252e-06, + 1.1295919577491986e-07, + 1.2257999060238944e-09, + 1.2305889640629175e-10 + ], + "fig03_lorenz_cost_2_PIC": [ + 1.7877261553173138, + 0.3142273731039822, + 0.005485709039071196, + 0.00018311118365943457, + 5.826593429958393e-06, + 6.0042481209166e-08, + 1.8812826851899445e-09 + ], + "fig03_lorenz_cost_2_RK4": [ + 8.4867645815102, + 1.9038004729431073, + 0.02789165289470219, + 0.0010602578270120233, + 4.556282476997353e-05, + 8.435568688014428e-07, + 4.594594216200676e-08 + ], + "fig03_lorenz_cost_2_ESDIRK43": [ + 1.0414477669801077, + 0.05906753442312329, + 0.001273883913817997, + 7.275281019403224e-05, + 4.313766595487323e-06, + 1.067262260789903e-07, + 6.590447299004154e-09 + ], + "fig02_stab_PIC_K1": [ + 7.000114284781357, + 6.720119046564576, + 6.4401242224044095, + 6.160129868760886, + 5.880136052847757, + 5.600142855320745, + 5.320150373814635, + 5.0401587276592785, + 4.760168064259918, + 4.480178567869813, + 4.200190471871483, + 3.920204076320517, + 3.6402197735851063, + 3.3602380868027786, + 3.0802597293085534, + 2.8002856997099417, + 2.5203174403237374, + 2.2403571143904717, + 1.9604081207748554, + 1.680476123008, + 1.4005713120009275, + 1.1207140580897523, + 0.8409518416651456, + 0.5614267539047277, + 0.28284271247461934, + 0.04000000000000081, + 0.28284271247461934, + 0.5614267539047285, + 0.8409518416651456, + 1.1207140580897532, + 1.4005713120009282, + 1.680476123008, + 1.9604081207748563, + 2.2403571143904717, + 2.5203174403237374, + 2.8002856997099426, + 3.0802597293085534, + 3.360238086802778, + 3.640219773585107, + 3.920204076320517, + 4.200190471871485, + 4.480178567869813, + 4.760168064259918, + 5.040158727659279, + 5.320150373814635, + 5.600142855320745, + 5.880136052847758, + 6.160129868760886, + 6.4401242224044095, + 6.720119046564576, + 7.000114284781357 + ], + "fig02_stab_PIC_K2": [ + 24.000833332199118, + 22.080036231854333, + 20.237639530340488, + 18.473643304989952, + 16.788047653017905, + 15.180852698053556, + 13.652058599346844, + 12.201665564995627, + 10.829673871359192, + 9.536083892248437, + 8.320896144046023, + 7.184111357711545, + 6.125730598059306, + 5.14575547028811, + 4.24418849722771, + 3.4210338554302564, + 2.676298936965001, + 2.0099980497502985, + 1.4221626348628351, + 0.9128767715305283, + 0.4824610243325356, + 0.13410443691392224, + 0.1517661358801759, + 0.3447285308761086, + 0.46173585522460786, + 0.5008000000000001, + 0.46173585522460786, + 0.34472853087610816, + 0.1517661358801759, + 0.1341044369139231, + 0.48246102433253696, + 0.9128767715305283, + 1.422162634862837, + 2.0099980497502985, + 2.676298936965001, + 3.421033855430258, + 4.24418849722771, + 5.145755470288109, + 6.125730598059306, + 7.184111357711545, + 8.32089614404603, + 9.536083892248437, + 10.829673871359192, + 12.201665564995631, + 13.652058599346844, + 15.180852698053556, + 16.788047653017912, + 18.473643304989952, + 20.237639530340488, + 22.080036231854336, + 24.000833332199118 + ], + "fig02_stab_PIC_K3": [ + 53.6647255983554, + 47.21523347395417, + 41.292590443358876, + 35.87484722916063, + 30.940055843776374, + 26.466270214436726, + 22.431547157261466, + 18.81394793196495, + 15.591540793066365, + 12.74240531502764, + 10.244640014443453, + 8.076376424885389, + 6.215806619779325, + 4.641241042528175, + 3.331241809962834, + 2.2649703852325422, + 1.423263899633515, + 0.792890308335551, + 0.3888208584071128, + 0.3012566746148532, + 0.3976410335722967, + 0.46315429442513467, + 0.46764327683395607, + 0.42842700168458636, + 0.3773545341511677, + 0.35334400000000055, + 0.3773545341511677, + 0.42842700168458653, + 0.46764327683395607, + 0.4631542944251346, + 0.3976410335722962, + 0.3012566746148532, + 0.38882085840711333, + 0.792890308335551, + 1.423263899633515, + 2.2649703852325453, + 3.331241809962834, + 4.641241042528172, + 6.215806619779328, + 8.076376424885389, + 10.244640014443469, + 12.74240531502764, + 15.591540793066365, + 18.813947931964957, + 22.431547157261466, + 26.466270214436726, + 30.94005584377639, + 35.87484722916063, + 41.292590443358876, + 47.21523347395417, + 53.6647255983554 + ], + "fig02_stab_PIC_K4": [ + 88.16099788326595, + 74.05194916003227, + 61.67487564549149, + 50.88543506953664, + 41.545464470401306, + 33.52299394488755, + 26.692267703159764, + 20.933777211364056, + 16.134314967568667, + 12.187064648654266, + 8.991757352605328, + 6.454950508697305, + 4.490531943199306, + 3.020587771583246, + 1.9764920564875095, + 1.2977607386793932, + 0.9184784067227706, + 0.7391506655647535, + 0.643710009695701, + 0.5621793697531063, + 0.47990795914469797, + 0.41102729599329035, + 0.37411158362053687, + 0.3707825529786753, + 0.3823413774219176, + 0.38873344000000043, + 0.3823413774219176, + 0.3707825529786752, + 0.37411158362053687, + 0.4110272959932906, + 0.4799079591446984, + 0.5621793697531063, + 0.643710009695701, + 0.7391506655647535, + 0.9184784067227706, + 1.2977607386793943, + 1.9764920564875095, + 3.0205877715832434, + 4.49053194319931, + 6.454950508697305, + 8.991757352605347, + 12.187064648654266, + 16.134314967568667, + 20.933777211364067, + 26.692267703159764, + 33.52299394488755, + 41.54546447040133, + 50.88543506953664, + 61.67487564549149, + 74.05194916003228, + 88.16099788326595 + ], + "fig02_stab_MIN-SR-NS_K1": [ + 2.451277356627698, + 2.4174442490464405, + 2.3807643395753706, + 2.3409609628989645, + 2.2977320992195103, + 2.2507495955779953, + 2.199659047305139, + 2.144080639053869, + 2.083611327673719, + 2.017828842548929, + 1.9462980782075747, + 1.8685805507442528, + 1.7842476721786285, + 1.6928986528399883, + 1.5941838668614057, + 1.487834536436156, + 1.3736997188431521, + 1.2517921465720794, + 1.1223463637329203, + 0.9858982364095321, + 0.8434113410426646, + 0.6965260331469926, + 0.548178312781095, + 0.4045083718065011, + 0.28180938877996664, + 0.22580645161290375, + 0.28180938877996664, + 0.40450837180650145, + 0.548178312781095, + 0.696526033146993, + 0.8434113410426648, + 0.9858982364095321, + 1.1223463637329207, + 1.2517921465720794, + 1.3736997188431521, + 1.4878345364361567, + 1.5941838668614057, + 1.6928986528399883, + 1.7842476721786287, + 1.8685805507442528, + 1.9462980782075754, + 2.017828842548929, + 2.083611327673719, + 2.144080639053869, + 2.199659047305139, + 2.2507495955779953, + 2.297732099219511, + 2.3409609628989645, + 2.3807643395753706, + 2.4174442490464396, + 2.451277356627698 + ], + "fig02_stab_MIN-SR-NS_K2": [ + 3.002969382673865, + 2.8734101207252674, + 2.7395327232628075, + 2.6013812803509353, + 2.4590572639194948, + 2.312733206139106, + 2.162668914058696, + 2.0092306065356733, + 1.8529134529721856, + 1.6943681897853506, + 1.5344329165891055, + 1.374172089849321, + 1.2149266603559687, + 1.0583832654828167, + 0.9066782529817402, + 0.7625667151675332, + 0.6297069019905379, + 0.5131048548221566, + 0.419550494109104, + 0.3569038132812675, + 0.3296900487603548, + 0.3326275437990051, + 0.3520815182058599, + 0.3745588264568256, + 0.3911162839144316, + 0.39710603374633413, + 0.3911162839144316, + 0.37455882645682537, + 0.3520815182058599, + 0.33262754379900505, + 0.3296900487603548, + 0.3569038132812675, + 0.41955049410910406, + 0.5131048548221566, + 0.6297069019905379, + 0.7625667151675334, + 0.9066782529817402, + 1.058383265482817, + 1.2149266603559685, + 1.374172089849321, + 1.5344329165891064, + 1.6943681897853506, + 1.8529134529721856, + 2.009230606535674, + 2.162668914058696, + 2.312733206139106, + 2.4590572639194948, + 2.6013812803509353, + 2.7395327232628075, + 2.873410120725269, + 3.002969382673865 + ], + "fig02_stab_MIN-SR-NS_K3": [ + 1.9005659587433803, + 1.7551162634461002, + 1.6115705422249815, + 1.4705933675268583, + 1.3329476587825249, + 1.1995081515063215, + 1.0712778209796159, + 0.9494078079808562, + 0.8352201557191514, + 0.7302284856126052, + 0.6361405969040516, + 0.5548026910625712, + 0.4880066743403536, + 0.4370636745585768, + 0.40215458533920356, + 0.38177705915688276, + 0.372818762614065, + 0.37138522439723676, + 0.3738368742591916, + 0.3774477919920838, + 0.3805735235684744, + 0.3825152686964925, + 0.38326366535581236, + 0.38321484207916795, + 0.38289734649907886, + 0.3827376099883696, + 0.38289734649907886, + 0.383214842079168, + 0.38326366535581236, + 0.3825152686964924, + 0.3805735235684743, + 0.3774477919920838, + 0.37383687425919154, + 0.37138522439723676, + 0.372818762614065, + 0.3817770591568831, + 0.40215458533920356, + 0.4370636745585771, + 0.4880066743403537, + 0.5548026910625712, + 0.6361405969040518, + 0.7302284856126052, + 0.8352201557191514, + 0.9494078079808571, + 1.0712778209796159, + 1.1995081515063215, + 1.3329476587825257, + 1.4705933675268583, + 1.6115705422249815, + 1.7551162634461006, + 1.9005659587433803 + ], + "fig02_stab_MIN-SR-NS_K4": [ + 1.083996695228847, + 0.9922553946082212, + 0.9055881230640629, + 0.8244239155904961, + 0.7492258883184664, + 0.6804799005683734, + 0.618671866968963, + 0.5642482332047767, + 0.5175552272621933, + 0.4787583059583021, + 0.4477554076696506, + 0.42411255906276346, + 0.4070562872449159, + 0.39554173868497283, + 0.3883820414148235, + 0.38439657216024725, + 0.3825337761652427, + 0.3819444763360174, + 0.3820051684812496, + 0.382304278932076, + 0.38260697274551664, + 0.38281105343772265, + 0.3829023018338091, + 0.38291425128175743, + 0.3828952050843189, + 0.3828839920048985, + 0.3828952050843189, + 0.38291425128175754, + 0.3829023018338091, + 0.38281105343772254, + 0.3826069727455165, + 0.382304278932076, + 0.3820051684812497, + 0.3819444763360174, + 0.3825337761652427, + 0.3843965721602476, + 0.3883820414148235, + 0.39554173868497305, + 0.40705628724491577, + 0.42411255906276346, + 0.4477554076696512, + 0.4787583059583021, + 0.5175552272621933, + 0.5642482332047762, + 0.618671866968963, + 0.6804799005683734, + 0.7492258883184669, + 0.8244239155904961, + 0.9055881230640629, + 0.9922553946082203, + 1.083996695228847 + ], + "fig02_stab_MIN-SR-S_K1": [ + 1.5681895510408013, + 1.5658887471083591, + 1.5632973852393248, + 1.5603652194507684, + 1.5570308630083416, + 1.5532187546673577, + 1.5488351422109645, + 1.5437627147645652, + 1.5378533596866597, + 1.530918292609237, + 1.5227144767544705, + 1.5129257625608559, + 1.5011364790337547, + 1.4867942279400221, + 1.4691573461660574, + 1.4472210763301527, + 1.4196157169969439, + 1.384472550825699, + 1.3392668482352628, + 1.2806922985637377, + 1.2047484164901567, + 1.1075391751245447, + 0.9879444356716439, + 0.8542321620751417, + 0.7355427463848079, + 0.684918002089284, + 0.7355427463848079, + 0.8542321620751422, + 0.9879444356716439, + 1.1075391751245451, + 1.2047484164901563, + 1.2806922985637377, + 1.3392668482352628, + 1.384472550825699, + 1.4196157169969439, + 1.4472210763301518, + 1.4691573461660574, + 1.4867942279400217, + 1.501136479033755, + 1.5129257625608559, + 1.5227144767544702, + 1.530918292609237, + 1.5378533596866597, + 1.5437627147645658, + 1.5488351422109645, + 1.5532187546673577, + 1.5570308630083411, + 1.5603652194507684, + 1.5632973852393248, + 1.5658887471083591, + 1.5681895510408013 + ], + "fig02_stab_MIN-SR-S_K2": [ + 1.3708218383534185, + 1.3579513518385802, + 1.3438509708065396, + 1.3283695691097568, + 1.3113332738609318, + 1.2925412821780513, + 1.2717607116992884, + 1.2487202063415532, + 1.2231019315784655, + 1.1945314893102643, + 1.1625651760378246, + 1.126673945563966, + 1.0862235358219618, + 1.0404507497016098, + 0.9884374344735098, + 0.9290875196735912, + 0.8611209987232731, + 0.7831164827676054, + 0.6936681388072764, + 0.5917814683990976, + 0.4777118570044506, + 0.3544946392037084, + 0.2302541872379886, + 0.12093031742731288, + 0.0537321766559337, + 0.04311743624296375, + 0.0537321766559337, + 0.12093031742731288, + 0.2302541872379886, + 0.35449463920370855, + 0.47771185700445157, + 0.5917814683990976, + 0.6936681388072766, + 0.7831164827676054, + 0.8611209987232731, + 0.9290875196735923, + 0.9884374344735098, + 1.0404507497016104, + 1.0862235358219623, + 1.126673945563966, + 1.1625651760378235, + 1.1945314893102643, + 1.2231019315784655, + 1.2487202063415537, + 1.2717607116992884, + 1.2925412821780513, + 1.3113332738609316, + 1.3283695691097568, + 1.3438509708065396, + 1.3579513518385802, + 1.3708218383534185 + ], + "fig02_stab_MIN-SR-S_K3": [ + 0.7019597064051052, + 0.700639986353974, + 0.6989563251583067, + 0.6968292277767597, + 0.6941616392123222, + 0.6908348696472933, + 0.6867035142028252, + 0.6815891004978799, + 0.6752721224826195, + 0.6674820277027697, + 0.6578846239121174, + 0.6460662864838697, + 0.6315143566388963, + 0.6135934141589331, + 0.5915181377004075, + 0.5643262347255502, + 0.5308615509834347, + 0.4897921217908585, + 0.43971782436185886, + 0.3794767988565869, + 0.30884092019477216, + 0.22986527278508043, + 0.1491673046559928, + 0.08181325565241776, + 0.054213794782107624, + 0.056002050608889714, + 0.054213794782107624, + 0.08181325565241794, + 0.1491673046559928, + 0.22986527278508043, + 0.3088409201947729, + 0.3794767988565869, + 0.4397178243618592, + 0.4897921217908585, + 0.5308615509834347, + 0.5643262347255511, + 0.5915181377004075, + 0.6135934141589333, + 0.631514356638896, + 0.6460662864838697, + 0.6578846239121168, + 0.6674820277027697, + 0.6752721224826195, + 0.6815891004978796, + 0.6867035142028252, + 0.6908348696472933, + 0.6941616392123227, + 0.6968292277767597, + 0.6989563251583067, + 0.700639986353974, + 0.7019597064051052 + ], + "fig02_stab_MIN-SR-S_K4": [ + 0.44011537615089086, + 0.44451545893925326, + 0.4483353654715732, + 0.4514427584678487, + 0.4536844082782443, + 0.4548838863726303, + 0.45483935528631736, + 0.4533216227588659, + 0.4500726854993916, + 0.44480505221875155, + 0.4372022036748745, + 0.42692062049011736, + 0.41359390374110105, + 0.3968396884852424, + 0.376270469741531, + 0.35151050735691625, + 0.322223449995591, + 0.2881606756539803, + 0.24925069092677496, + 0.2057666499512023, + 0.1586275405701332, + 0.10988984627678099, + 0.06348660644760294, + 0.027217456756445747, + 0.020208065924905174, + 0.025837923521782987, + 0.020208065924905174, + 0.027217456756445754, + 0.06348660644760294, + 0.10988984627678103, + 0.15862754057013365, + 0.2057666499512023, + 0.24925069092677504, + 0.2881606756539803, + 0.322223449995591, + 0.3515105073569162, + 0.376270469741531, + 0.3968396884852424, + 0.4135939037411013, + 0.42692062049011736, + 0.437202203674873, + 0.44480505221875155, + 0.4500726854993916, + 0.4533216227588673, + 0.45483935528631736, + 0.4548838863726303, + 0.45368440827824313, + 0.4514427584678487, + 0.4483353654715732, + 0.44451545893925326, + 0.44011537615089086 + ], + "fig02_stab_MIN-SR-FLEX_K1": [ + 0.028187024643050368, + 0.029328161605091344, + 0.030564037309450207, + 0.03190673381631632, + 0.033370432213299994, + 0.03497187643356691, + 0.036730961308117926, + 0.03867148354290258, + 0.04082210766325184, + 0.04321761701690755, + 0.04590054396138959, + 0.04892330434459896, + 0.05235099861005798, + 0.05626507930363149, + 0.060768101715581134, + 0.06598971168616832, + 0.07209372509105907, + 0.07928521511404357, + 0.08781394722693871, + 0.09796391673838656, + 0.1100030801293661, + 0.1240347345892085, + 0.1396451933694865, + 0.15526752351113435, + 0.16760038078849787, + 0.17241379310344843, + 0.16760038078849787, + 0.15526752351113426, + 0.1396451933694865, + 0.12403473458920848, + 0.11000308012936605, + 0.09796391673838656, + 0.08781394722693865, + 0.07928521511404357, + 0.07209372509105907, + 0.0659897116861683, + 0.060768101715581134, + 0.056265079303631504, + 0.05235099861005797, + 0.04892330434459896, + 0.04590054396138957, + 0.04321761701690755, + 0.04082210766325184, + 0.03867148354290258, + 0.036730961308117926, + 0.03497187643356691, + 0.033370432213299994, + 0.03190673381631632, + 0.030564037309450207, + 0.029328161605091344, + 0.028187024643050368 + ], + "fig02_stab_MIN-SR-FLEX_K2": [ + 0.12460226580300111, + 0.12876307138372156, + 0.13316948260286046, + 0.13783799171521444, + 0.14278526009608267, + 0.14802758436819138, + 0.1535800829143388, + 0.1594554787820634, + 0.16566229978343336, + 0.17220223531926473, + 0.1790662684721548, + 0.1862290193050813, + 0.19364045502711683, + 0.2012136843153753, + 0.20880685489202558, + 0.216196049467911, + 0.22303428530085087, + 0.22878905543164432, + 0.23264773272623804, + 0.23338013376836483, + 0.22916560168667024, + 0.21747752999617004, + 0.19539886900633482, + 0.16144585075459103, + 0.12135150148020908, + 0.09968826343271538, + 0.12135150148020908, + 0.16144585075459122, + 0.19539886900633482, + 0.21747752999617026, + 0.22916560168667016, + 0.23338013376836483, + 0.23264773272623807, + 0.22878905543164432, + 0.22303428530085087, + 0.21619604946791077, + 0.20880685489202558, + 0.20121368431537526, + 0.19364045502711666, + 0.1862290193050813, + 0.17906626847215484, + 0.17220223531926473, + 0.16566229978343336, + 0.15945547878206337, + 0.1535800829143388, + 0.14802758436819138, + 0.14278526009608256, + 0.13783799171521444, + 0.13316948260286046, + 0.12876307138372156, + 0.12460226580300111 + ], + "fig02_stab_MIN-SR-FLEX_K3": [ + 0.2092755711125658, + 0.21457678959555843, + 0.22001638440770352, + 0.22557523668877055, + 0.23122552838571847, + 0.2369279253165403, + 0.24262785272248072, + 0.24825056420000377, + 0.2536946108642573, + 0.25882320337742776, + 0.26345283297680794, + 0.26733840558817096, + 0.2701541139032268, + 0.2714694823350061, + 0.27072080478762367, + 0.26718024301607507, + 0.25992950743802695, + 0.24785475455638567, + 0.22969804555008366, + 0.20423293415467675, + 0.17067603175838747, + 0.12947186410589723, + 0.08348046749716657, + 0.03912785866503254, + 0.006592844946920013, + 0.007324602184796184, + 0.006592844946920013, + 0.03912785866503254, + 0.08348046749716657, + 0.12947186410589737, + 0.17067603175838766, + 0.20423293415467675, + 0.2296980455500837, + 0.24785475455638567, + 0.25992950743802695, + 0.26718024301607546, + 0.27072080478762367, + 0.27146948233500606, + 0.270154113903227, + 0.26733840558817096, + 0.26345283297680816, + 0.25882320337742776, + 0.2536946108642573, + 0.2482505642000037, + 0.24262785272248072, + 0.2369279253165403, + 0.2312255283857186, + 0.22557523668877055, + 0.22001638440770352, + 0.21457678959555843, + 0.2092755711125658 + ], + "fig02_stab_MIN-SR-FLEX_K4": [ + 0.14981088090996628, + 0.15574163134499502, + 0.16196521759331728, + 0.16847965882151497, + 0.17527561316460388, + 0.18233346898181277, + 0.18961941740249083, + 0.19708015023762535, + 0.20463570595046507, + 0.21216983103135875, + 0.2195170355860565, + 0.2264453169196286, + 0.23263335979413294, + 0.23764104599381367, + 0.2408726760757283, + 0.24153423139410568, + 0.23859102851987118, + 0.2307437625687536, + 0.21646549465083265, + 0.19418794286113564, + 0.16279328294943415, + 0.12261859681019463, + 0.0771155259694378, + 0.03567607024560152, + 0.022503375130472236, + 0.02868747520686435, + 0.022503375130472236, + 0.03567607024560173, + 0.0771155259694378, + 0.12261859681019471, + 0.1627932829494339, + 0.19418794286113564, + 0.2164654946508324, + 0.2307437625687536, + 0.23859102851987118, + 0.24153423139410563, + 0.2408726760757283, + 0.23764104599381403, + 0.23263335979413335, + 0.2264453169196286, + 0.21951703558605518, + 0.21216983103135875, + 0.20463570595046507, + 0.19708015023762504, + 0.18961941740249083, + 0.18233346898181277, + 0.17527561316460186, + 0.16847965882151497, + 0.16196521759331728, + 0.15574163134499502, + 0.14981088090996628 + ], + "fig02_stab_LU_K1": [ + 0.05437157508556943, + 0.05730883943673234, + 0.06051627319722353, + 0.06402342777397946, + 0.06786237664748208, + 0.07206730548157753, + 0.07667364560558954, + 0.08171648344237094, + 0.08722784223683687, + 0.09323223525601045, + 0.09973961363197617, + 0.10673446792240499, + 0.1141594160563379, + 0.12189124864736355, + 0.12970748833173718, + 0.1372430311176931, + 0.1439416485684184, + 0.14902089288130416, + 0.15150042817806994, + 0.1504058287433658, + 0.14534878085373315, + 0.13766289654398445, + 0.13147789285511044, + 0.13171083272067322, + 0.13742137999626672, + 0.1408764345946884, + 0.13742137999626672, + 0.1317108327206731, + 0.13147789285511044, + 0.13766289654398467, + 0.1453487808537331, + 0.1504058287433658, + 0.15150042817807022, + 0.14902089288130416, + 0.1439416485684184, + 0.13724303111769165, + 0.12970748833173718, + 0.1218912486473635, + 0.11415941605633655, + 0.10673446792240499, + 0.0997396136319749, + 0.09323223525601045, + 0.08722784223683687, + 0.08171648344237142, + 0.07667364560558954, + 0.07206730548157753, + 0.0678623766474818, + 0.06402342777397946, + 0.06051627319722353, + 0.05730883943673234, + 0.05437157508556943 + ], + "fig02_stab_LU_K2": [ + 0.11064218235397395, + 0.11412472322561518, + 0.11775069468730881, + 0.12151122437601981, + 0.1253899975063768, + 0.12936024356753256, + 0.13338062670480047, + 0.13738967785962927, + 0.1412983233472439, + 0.1449800021823555, + 0.14825787947478664, + 0.1508888737559997, + 0.15254485343283147, + 0.15279284616510466, + 0.15107916537517393, + 0.1467280754201414, + 0.13897527276768473, + 0.12707060482289403, + 0.11050024283721877, + 0.08938502076115579, + 0.06509187382424403, + 0.041098070056879994, + 0.024413514151413452, + 0.02216709387778496, + 0.02535687626513552, + 0.02647951310305598, + 0.02535687626513552, + 0.02216709387778487, + 0.024413514151413452, + 0.04109807005687992, + 0.06509187382424421, + 0.08938502076115579, + 0.11050024283721899, + 0.12707060482289403, + 0.13897527276768473, + 0.14672807542014113, + 0.15107916537517393, + 0.15279284616510516, + 0.1525448534328315, + 0.1508888737559997, + 0.14825787947478603, + 0.1449800021823555, + 0.1412983233472439, + 0.13738967785962952, + 0.13338062670480047, + 0.12936024356753256, + 0.1253899975063767, + 0.12151122437601981, + 0.11775069468730881, + 0.11412472322561518, + 0.11064218235397395 + ], + "fig02_stab_LU_K3": [ + 0.09924313175535365, + 0.10256936864678223, + 0.1060740364703216, + 0.1097594691137235, + 0.11362287982045535, + 0.1176535115486785, + 0.12182847759772418, + 0.1261067405313517, + 0.13042048754191238, + 0.13466295524986524, + 0.13867160343578624, + 0.1422056002562168, + 0.14491721775363417, + 0.1463186453830178, + 0.14575014762144484, + 0.14236430205492528, + 0.1351563480424143, + 0.1230925791051279, + 0.10540918335808579, + 0.08214443687378736, + 0.054867645006157005, + 0.027343588100267213, + 0.006893725147935451, + 0.010717208266320699, + 0.012048817253435427, + 0.009600340425261084, + 0.012048817253435427, + 0.010717208266320688, + 0.006893725147935451, + 0.027343588100267307, + 0.054867645006157116, + 0.08214443687378736, + 0.10540918335808583, + 0.1230925791051279, + 0.1351563480424143, + 0.1423643020549253, + 0.14575014762144484, + 0.14631864538301773, + 0.14491721775363423, + 0.1422056002562168, + 0.13867160343578622, + 0.13466295524986524, + 0.13042048754191238, + 0.12610674053135176, + 0.12182847759772418, + 0.1176535115486785, + 0.11362287982045538, + 0.1097594691137235, + 0.1060740364703216, + 0.10256936864678223, + 0.09924313175535365 + ], + "fig02_stab_LU_K4": [ + 0.10000826479194143, + 0.10308077565753798, + 0.10629748047902912, + 0.10965959258663721, + 0.113164833329334, + 0.11680550433107302, + 0.1205655597328731, + 0.12441614222250086, + 0.12830877151867878, + 0.13216499453037694, + 0.13586084158547385, + 0.1392040071797701, + 0.14190166272427712, + 0.14351810827578076, + 0.14342589089685656, + 0.14076462648445495, + 0.134442420761918, + 0.12324687798730673, + 0.10616530072892094, + 0.08300436681668819, + 0.05525778128121263, + 0.026809130814036408, + 0.004036128019128927, + 0.009816170908131286, + 0.01064139202386972, + 0.006890913854016109, + 0.01064139202386972, + 0.009816170908131212, + 0.004036128019128927, + 0.026809130814036377, + 0.05525778128121262, + 0.08300436681668819, + 0.1061653007289208, + 0.12324687798730673, + 0.134442420761918, + 0.14076462648445495, + 0.14342589089685656, + 0.14351810827578088, + 0.14190166272427712, + 0.1392040071797701, + 0.13586084158547385, + 0.13216499453037694, + 0.12830877151867878, + 0.12441614222250086, + 0.1205655597328731, + 0.11680550433107302, + 0.11316483332933404, + 0.10965959258663721, + 0.10629748047902912, + 0.10308077565753798, + 0.10000826479194143 + ], + "fig02_stab_VDHS_K1": [ + 3.195612004593886, + 3.1884045501042717, + 3.1803098019665814, + 3.1711795427653766, + 3.160834169226464, + 3.149054650315801, + 3.1355720846730035, + 3.1200540537197097, + 3.102086680290597, + 3.0811509182863452, + 3.0565910961846843, + 3.0275731120013494, + 2.9930289788173274, + 2.9515838417225377, + 2.901461676424689, + 2.8403680100137234, + 2.765355398259471, + 2.672697239361676, + 2.557842972334034, + 2.4156323924864966, + 2.241159050032629, + 2.032055149104581, + 1.7935202144596682, + 1.5476700334386027, + 1.3457219510826024, + 1.263999904701775, + 1.3457219510826024, + 1.5476700334386035, + 1.7935202144596682, + 2.032055149104582, + 2.2411590500326297, + 2.4156323924864966, + 2.557842972334035, + 2.672697239361676, + 2.765355398259471, + 2.8403680100137234, + 2.901461676424689, + 2.9515838417225377, + 2.9930289788173274, + 3.0275731120013494, + 3.0565910961846834, + 3.0811509182863452, + 3.102086680290597, + 3.1200540537197115, + 3.1355720846730035, + 3.149054650315801, + 3.160834169226464, + 3.1711795427653766, + 3.1803098019665814, + 3.1884045501042717, + 3.195612004593886 + ], + "fig02_stab_VDHS_K2": [ + 6.790340392776408, + 6.7318153407239585, + 6.666947380317056, + 6.594847020651203, + 6.514476749946657, + 6.424624262853711, + 6.323871219198961, + 6.210557300850504, + 6.082739774599167, + 5.938149636167571, + 5.774147015863028, + 5.587681394781982, + 5.3752671874774824, + 5.132993836759378, + 4.856604070282933, + 4.5416979926595795, + 4.1841593080419575, + 3.780958759853372, + 3.3315698503169817, + 2.8403134350510637, + 2.3199513909887286, + 1.7965590071154278, + 1.3145643733394172, + 0.9366413928482918, + 0.719213618629633, + 0.6568620320030542, + 0.719213618629633, + 0.9366413928482922, + 1.3145643733394172, + 1.7965590071154285, + 2.3199513909887326, + 2.8403134350510637, + 3.3315698503169817, + 3.780958759853372, + 4.1841593080419575, + 4.54169799265958, + 4.856604070282933, + 5.132993836759379, + 5.375267187477487, + 5.587681394781982, + 5.774147015863029, + 5.938149636167571, + 6.082739774599167, + 6.210557300850508, + 6.323871219198961, + 6.424624262853711, + 6.5144767499466605, + 6.594847020651203, + 6.666947380317056, + 6.7318153407239585, + 6.790340392776408 + ], + "fig02_stab_VDHS_K3": [ + 5.685749926075608, + 5.5927000633395005, + 5.490438979959113, + 5.377853384554079, + 5.253689713169896, + 5.116542598074629, + 4.964846759989703, + 4.796875522667894, + 4.610750925088506, + 4.404473004831804, + 4.17597953891285, + 3.923252662988198, + 3.6444955459584696, + 3.338410463029832, + 3.004617896986434, + 2.644260988967786, + 2.2608321143695904, + 1.8612209689903887, + 1.45688508084718, + 1.06484052988476, + 0.7078373307705315, + 0.4127173734371765, + 0.20572301803938034, + 0.09911607310456817, + 0.05264909553311433, + 0.019349549543362814, + 0.05264909553311433, + 0.09911607310456852, + 0.20572301803938034, + 0.41271737343717685, + 0.7078373307705321, + 1.06484052988476, + 1.4568850808471812, + 1.8612209689903887, + 2.2608321143695904, + 2.644260988967786, + 3.004617896986434, + 3.338410463029832, + 3.644495545958474, + 3.923252662988198, + 4.175979538912847, + 4.404473004831804, + 4.610750925088506, + 4.796875522667899, + 4.964846759989703, + 5.116542598074629, + 5.253689713169898, + 5.377853384554079, + 5.490438979959113, + 5.5927000633395005, + 5.685749926075608 + ], + "fig02_stab_VDHS_K4": [ + 1.899732692578897, + 1.9304694899601873, + 1.9594267677020383, + 1.985903059848596, + 2.009031014591944, + 2.027743652988205, + 2.0407362804343543, + 2.0464253202130953, + 2.042907007482346, + 2.027921678677601, + 1.9988338881521328, + 1.952645513782897, + 1.8860691545344963, + 1.7957029158238085, + 1.6783642680927853, + 1.5316558679320669, + 1.3548388205801005, + 1.1500551039480047, + 0.9238302601569819, + 0.6885521419216293, + 0.46326356378145833, + 0.2728132139731845, + 0.1439009229064962, + 0.08599421590986517, + 0.056077601404601884, + 0.033203047679268856, + 0.056077601404601884, + 0.08599421590986522, + 0.1439009229064962, + 0.2728132139731851, + 0.4632635637814586, + 0.6885521419216293, + 0.9238302601569822, + 1.1500551039480047, + 1.3548388205801005, + 1.531655867932069, + 1.6783642680927853, + 1.7957029158238047, + 1.8860691545344976, + 1.952645513782897, + 1.9988338881521326, + 2.027921678677601, + 2.042907007482346, + 2.0464253202130966, + 2.0407362804343543, + 2.027743652988205, + 2.0090310145919457, + 1.985903059848596, + 1.9594267677020383, + 1.9304694899601873, + 1.899732692578897 + ], + "fig01_conv_MIN-SR-NS_RADAU-RIGHT_$K=1$": [ + 4.941342598710849, + 3.419771369185764, + 1.4978113187611073, + 0.6241504532130073, + 0.21762278465819465, + 0.10366607658872837, + 0.050578419193392954, + 0.019934857291009158, + 0.009918413225837593 + ], + "fig01_conv_MIN-SR-NS_RADAU-RIGHT_$K=2$": [ + 1.8938215776490808, + 0.2308914036179199, + 0.052850386974164436, + 0.0129773469807926, + 0.002068300699302013, + 0.0005168412339210775, + 0.0001291969993586008, + 2.0670955134356826e-05, + 5.167719217668702e-06 + ], + "fig01_conv_MIN-SR-NS_RADAU-RIGHT_$K=3$": [ + 0.2199009313824977, + 0.004697991544023676, + 0.0002664356244503095, + 1.6125216649134168e-05, + 4.088009511247602e-07, + 2.551376838435805e-08, + 1.5940437613558508e-09, + 4.080312139333772e-11, + 2.552298404819443e-12 + ], + "fig01_conv_MIN-SR-NS_RADAU-RIGHT_$K=4$": [ + 0.05470552776170463, + 0.0005087920868269743, + 1.4164294680774433e-05, + 4.24046663338039e-07, + 4.284161083546592e-09, + 1.3361016646099639e-10, + 4.173030773756105e-12, + 2.137620195200722e-14, + 8.416160159348154e-14 + ], + "fig01_conv_MIN-SR-NS_LOBATTO_$K=1$": [ + 6.190193985474138, + 4.564431644023643, + 1.9562903819473227, + 0.7859748528680782, + 0.2663724122131104, + 0.12563792577697364, + 0.060995085922845756, + 0.023969146511500658, + 0.011913855170148572 + ], + "fig01_conv_MIN-SR-NS_LOBATTO_$K=2$": [ + 3.4183636540209306, + 0.4386943664507052, + 0.10065070949512224, + 0.024844725733692022, + 0.0039689544670134195, + 0.0009921912084584923, + 0.0002480490021368055, + 3.968799370062174e-05, + 9.922005815852604e-06 + ], + "fig01_conv_MIN-SR-NS_LOBATTO_$K=3$": [ + 0.49881232709192924, + 0.02583461969153564, + 0.0031470127174889344, + 0.00039063173361450823, + 2.494735811068925e-05, + 3.117427913310439e-06, + 3.896469685786101e-07, + 2.4936833185918742e-08, + 3.1169809783648595e-09 + ], + "fig01_conv_MIN-SR-NS_LOBATTO_$K=4$": [ + 0.0407925305246079, + 0.0003045358483721667, + 8.557543641014717e-06, + 2.591841341298208e-07, + 2.6298737836684637e-09, + 8.208436720886738e-11, + 2.5630599673444606e-12, + 2.22895549914363e-14, + 1.1524745754669983e-13 + ], + "fig01_conv_MIN-SR-NS_LOBATTO_$K=5$": [ + 0.007985940152898533, + 2.3310631988454857e-05, + 3.1431424527307014e-07, + 4.6822478703638075e-09, + 1.8905080337620303e-11, + 2.968083987775387e-13, + 6.651597296487977e-15, + 5.3032945191522335e-15, + 1.152452015824958e-13 + ], + "fig01_conv_MIN-SR-S_RADAU-RIGHT_$K=1$": [ + 2.7106421300605414, + 1.3468140257640926, + 0.5765788475798426, + 0.25579156308503515, + 0.09502132072691982, + 0.04639275748537765, + 0.022927873436374358, + 0.009108177881982187, + 0.004543718474032761 + ], + "fig01_conv_MIN-SR-S_RADAU-RIGHT_$K=2$": [ + 0.8645651094810126, + 0.14815111847603668, + 0.03462832011582988, + 0.008382901044432052, + 0.0013266717622784736, + 0.00033112028589555513, + 8.274549728412895e-05, + 1.3237724989094307e-05, + 3.3093756439013806e-06 + ], + "fig01_conv_MIN-SR-S_RADAU-RIGHT_$K=3$": [ + 0.42980107795084854, + 0.02667779994762687, + 0.00271591963330548, + 0.0003095002809453826, + 1.92041647635049e-05, + 2.389404731133338e-06, + 2.9832640065613164e-07, + 1.9086659714773144e-08, + 2.385649202563511e-09 + ], + "fig01_conv_MIN-SR-S_RADAU-RIGHT_$K=4$": [ + 0.23247161868411034, + 0.006719952620684487, + 0.00028264905419148913, + 1.351226471653935e-05, + 3.082596809060139e-07, + 1.890464103164041e-08, + 1.1758208295949162e-09, + 3.00600897372531e-11, + 1.87876958978415e-12 + ], + "fig01_conv_MIN-SR-S_LOBATTO_$K=1$": [ + 4.3318685135819885, + 2.857995474384535, + 1.2649197356983224, + 0.5377561633505761, + 0.19049025456855553, + 0.09125061453813002, + 0.04464684843607603, + 0.01762690989178756, + 0.00877508244660792 + ], + "fig01_conv_MIN-SR-S_LOBATTO_$K=2$": [ + 1.2264461026987865, + 0.11674766437677365, + 0.022912998894168042, + 0.005286871059936933, + 0.0008256196332030201, + 0.00020568259160717883, + 5.137570934826785e-05, + 8.218106665925478e-06, + 2.05445513321366e-06 + ], + "fig01_conv_MIN-SR-S_LOBATTO_$K=3$": [ + 0.2831020228917702, + 0.013372662509927104, + 0.0014187424529362442, + 0.00016749309299216408, + 1.0531954045215518e-05, + 1.3130948830520997e-06, + 1.6403044391928728e-07, + 1.0496069545472758e-08, + 1.3119578066245256e-09 + ], + "fig01_conv_MIN-SR-S_LOBATTO_$K=4$": [ + 0.09459785256987645, + 0.0015958539060761836, + 7.836275861453235e-05, + 4.49754998021275e-06, + 1.1212761097971252e-07, + 6.980770763856037e-09, + 4.35870324288637e-10, + 1.1154262258148805e-11, + 6.949468200938221e-13 + ], + "fig01_conv_MIN-SR-S_LOBATTO_$K=5$": [ + 0.036953883603893734, + 0.00024577045180101134, + 5.324185968619995e-06, + 1.4367291992122873e-07, + 1.4025075157666007e-09, + 4.3513568454071483e-11, + 1.3491207844008615e-12, + 1.3522437266264141e-14, + 1.351438910099341e-14 + ], + "fig01_conv_MIN-SR-FLEX_RADAU-RIGHT_$K=1$": [ + 1.1296012593172537, + 1.0244379690830323, + 0.8600638566995475, + 0.6218954441914762, + 0.32521014689478, + 0.17896839255044747, + 0.09395842480535667, + 0.038707661192999315, + 0.019545455623633533 + ], + "fig01_conv_MIN-SR-FLEX_RADAU-RIGHT_$K=2$": [ + 1.0827208580932455, + 0.4342330680848202, + 0.16550575908104556, + 0.04886815718381653, + 0.008202056458742382, + 0.002063159171689222, + 0.0005165335891143117, + 8.267744383782074e-05, + 2.0670481226052964e-05 + ], + "fig01_conv_MIN-SR-FLEX_RADAU-RIGHT_$K=3$": [ + 0.6011053082231909, + 0.09866773167747889, + 0.013675898346654926, + 0.001516014142172037, + 8.863796360076962e-05, + 1.0889409010320152e-05, + 1.354988180025796e-06, + 8.660720429314615e-08, + 1.0823778538044859e-08 + ], + "fig01_conv_MIN-SR-FLEX_RADAU-RIGHT_$K=4$": [ + 0.3136196302094444, + 0.016484675621585364, + 0.0008793766642878593, + 3.6924438506982123e-05, + 6.346945211687702e-07, + 3.552804329469873e-08, + 2.149473902127427e-09, + 5.4500022465542657e-11, + 3.397735730401283e-12 + ], + "fig01_conv_MIN-SR-FLEX_LOBATTO_$K=1$": [ + 1.1296012593172537, + 1.0244379690830323, + 0.8600638566995474, + 0.6218954441914761, + 0.32521014689477934, + 0.17896839255044764, + 0.09395842480535632, + 0.038707661192999315, + 0.019545455623633533 + ], + "fig01_conv_MIN-SR-FLEX_LOBATTO_$K=2$": [ + 1.0754387340055667, + 0.433860928312898, + 0.16549671967510293, + 0.048868091036633324, + 0.008202056407930255, + 0.0020631591714900003, + 0.000516533589114674, + 8.267744383958278e-05, + 2.0670481227268063e-05 + ], + "fig01_conv_MIN-SR-FLEX_LOBATTO_$K=3$": [ + 0.6358733551797489, + 0.1000929615831124, + 0.013707657328314872, + 0.0015163764245318834, + 8.863864986158586e-05, + 1.0889414501929428e-05, + 1.354988221290613e-06, + 8.660720414096238e-08, + 1.0823777693251798e-08 + ], + "fig01_conv_MIN-SR-FLEX_LOBATTO_$K=4$": [ + 0.2683402781257809, + 0.014666007432830145, + 0.0008440493056453323, + 3.658879510702298e-05, + 6.34311734271313e-07, + 3.552633352155699e-08, + 2.1494663013225486e-09, + 5.4498602104157756e-11, + 3.3971325117286916e-12 + ], + "fig01_conv_MIN-SR-FLEX_LOBATTO_$K=5$": [ + 0.1058558285699054, + 0.0019763887586471958, + 4.330592799751463e-05, + 7.454355761257005e-07, + 3.7229148855304855e-09, + 8.480839728703253e-11, + 2.3377761651159793e-12, + 1.8352664197006546e-14, + 1.1480244080879017e-13 + ] +} \ No newline at end of file diff --git a/pySDC/projects/parallelSDC_reloaded/scripts/crop.sh b/pySDC/projects/parallelSDC_reloaded/scripts/crop.sh new file mode 100755 index 0000000000..a178e646e3 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/scripts/crop.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Trim all figures +echo "------------------------------------------------------------" +echo "Croping figures ..." +for fig in *.pdf +do + pdfcrop "${fig}" "${fig}" +done \ No newline at end of file diff --git a/pySDC/projects/parallelSDC_reloaded/scripts/fig01_conv.py b/pySDC/projects/parallelSDC_reloaded/scripts/fig01_conv.py new file mode 100644 index 0000000000..618810e6a1 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/scripts/fig01_conv.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Jan 9 16:00:41 2024 + +Convergence plots (on Dahlquist) for the article +""" +import os +import numpy as np + +from pySDC.projects.parallelSDC_reloaded.utils import getParamsSDC, solutionSDC, plt +from pySDC.helpers.testing import DataChecker + +data = DataChecker(__file__) + +PATH = '/' + os.path.join(*__file__.split('/')[:-1]) +SCRIPT = __file__.split('/')[-1].split('.')[0] + +# Script parameters +lam = 1j +tEnd = 2 * np.pi +nStepsList = np.array([2, 5, 10, 20, 50, 100, 200, 500, 1000]) +dtVals = tEnd / nStepsList + +# Collocation parameters +nodeType = "LEGENDRE" + + +def getError(uNum, uRef): + if uNum is None: # pragma: no cover + return np.inf + return np.linalg.norm(uRef - uNum[:, 0], np.inf) + + +# Configuration +# (nNodes, quadType, sweepType) +config = [ + (4, "RADAU-RIGHT", "MIN-SR-NS"), + (5, "LOBATTO", "MIN-SR-NS"), + (4, "RADAU-RIGHT", "MIN-SR-S"), + (5, "LOBATTO", "MIN-SR-S"), + (4, "RADAU-RIGHT", "MIN-SR-FLEX"), + (5, "LOBATTO", "MIN-SR-FLEX"), +] + +# ----------------------------------------------------------------------------- +# Script execution +# ----------------------------------------------------------------------------- +for nNodes, quadType, sweepType in config: + # Schemes parameters + schemes = [ + # ("RK4", None), ("ESDIRK53", None), + *[(sweepType, i) for i in range(1, nNodes + 1)] + ] + + # Plot styles + styles = [ + # dict(ls="--", c="gray"), dict(ls="-.", c="gray"), + dict(ls="-", marker='o'), + dict(ls="-", marker='>'), + dict(ls="-", marker='s'), + dict(ls="-", marker='^'), + dict(ls="-", marker='*'), + ] + + # Figure generation + figName = f"{sweepType}_{quadType}" + plt.figure(f"{sweepType}_{quadType}") + for (qDelta, nSweeps), style in zip(schemes, styles): + params = getParamsSDC(quadType, nNodes, qDelta, nSweeps, nodeType) + label = f"$K={nSweeps}$" + errors = [] + + for nSteps in nStepsList: + uNum, counters, parallel = solutionSDC(tEnd, nSteps, params, 'DAHLQUIST', lambdas=np.array([lam])) + + tVals = np.linspace(0, tEnd, nSteps + 1) + uExact = np.exp(lam * tVals) + + err = getError(uNum, uExact) + errors.append(err) + + plt.loglog(dtVals, errors, **style, label=label) + + data.storeAndCheck(f"{SCRIPT}_{figName}_{label}", errors) + + if nSweeps is not None: + plt.loglog(dtVals, (0.1 * dtVals) ** nSweeps, '--', c='gray', lw=1.5) + + plt.legend() + plt.xlabel(r"$\Delta{t}$") + plt.ylabel(r"$L_\infty$ error") + plt.grid(True) + plt.tight_layout() + plt.savefig(f"{PATH}/{SCRIPT}_{figName}.pdf") + +data.writeToJSON() diff --git a/pySDC/projects/parallelSDC_reloaded/scripts/fig02_stab.py b/pySDC/projects/parallelSDC_reloaded/scripts/fig02_stab.py new file mode 100644 index 0000000000..625334d34c --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/scripts/fig02_stab.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Jan 10 12:02:55 2024 + +Stability plots (on Dahlquist) for the article +""" +import os +import numpy as np + +from pySDC.projects.parallelSDC_reloaded.utils import getParamsSDC, solutionSDC, plotStabContour, plt +from pySDC.helpers.testing import DataChecker + +data = DataChecker(__file__) + +PATH = '/' + os.path.join(*__file__.split('/')[:-1]) +SCRIPT = __file__.split('/')[-1].split('.')[0] + +# Script parameters +zoom = 2 +reLims = -4.5 * zoom, 0.5 * zoom +imLims = -3.5 * zoom, 3.5 * zoom +nVals = 251 + +# Collocation parameters +nNodes = 4 +nodeType = "LEGENDRE" +quadType = "RADAU-RIGHT" + +# Configuration +# (qDeltaType) +config = [ + "PIC", + "MIN-SR-NS", + "MIN-SR-S", + "MIN-SR-FLEX", + "LU", + "VDHS", +] + + +# ----------------------------------------------------------------------------- +# Script execution +# ----------------------------------------------------------------------------- + +# Problem instanciation +reVals = np.linspace(*reLims, num=nVals) +imVals = np.linspace(*imLims, num=nVals) +lambdas = reVals[None, :] + 1j * imVals[:, None] + +# Scheme instanciation +for qDeltaType in config: + if qDeltaType == "MIN-SR-S": + fac = 5 + reVals *= fac + imVals *= fac + lambdas *= fac + + for nSweeps in [1, 2, 3, 4]: + params = getParamsSDC(quadType, nNodes, qDeltaType, nSweeps, nodeType) + + uNum, counters, parallel = solutionSDC(1, 1, params, 'DAHLQUIST', lambdas=lambdas.ravel()) + + uEnd = uNum[-1, :].reshape(lambdas.shape) + stab = np.abs(uEnd) + + figName = f"{qDeltaType}_K{nSweeps}" + plt.figure(figName) + + plotStabContour(reVals, imVals, stab) + data.storeAndCheck(f"{SCRIPT}_{figName}", stab[::5, -50]) + + plt.xticks(fontsize=8) + plt.yticks(fontsize=8) + plt.title(f"$K={nSweeps}$", fontsize=10) + plt.gcf().set_size_inches(2.5, 2.5) + plt.tight_layout() + plt.savefig(f"{PATH}/{SCRIPT}_{figName}.pdf") + +data.writeToJSON() diff --git a/pySDC/projects/parallelSDC_reloaded/scripts/fig03_lorenz.py b/pySDC/projects/parallelSDC_reloaded/scripts/fig03_lorenz.py new file mode 100644 index 0000000000..891689abe5 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/scripts/fig03_lorenz.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Jan 10 15:34:24 2024 + +Figures with experiment on the Lorenz problem +""" +import os +import numpy as np +import scipy as sp + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact, getParamsSDC, solutionSDC, getParamsRK, plt +from pySDC.helpers.testing import DataChecker + +data = DataChecker(__file__) + +PATH = '/' + os.path.join(*__file__.split('/')[:-1]) +SCRIPT = __file__.split('/')[-1].split('.')[0] + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 + +# SDC parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 0.8 # 1/nNodes + +# ----------------------------------------------------------------------------- +# Trajectories (reference solution) +# ----------------------------------------------------------------------------- +tEnd = 2 +nSteps = tEnd * 50 +tVals = np.linspace(0, tEnd, nSteps + 1) +nPeriods = 2 + +print(f"Computing exact solution up to t={tEnd} ...") +uExact = solutionExact(tEnd, nSteps, "LORENZ", u0=(5, -5, 20)) + +z = uExact[:, -1] +idx = sp.signal.find_peaks(z)[0][nPeriods - 1] +print(f'tEnd for {nPeriods} periods : {tVals[idx]}') + +figName = f"{SCRIPT}_traj" +plt.figure(figName) +me = 0.1 +plt.plot(tVals, uExact[:, 0], 's-', label="$x(t)$", markevery=me) +plt.plot(tVals, uExact[:, 1], 'o-', label="$y(t)$", markevery=me) +plt.plot(tVals, uExact[:, 2], '^-', label="$z(t)$", markevery=me) +plt.vlines(tVals[idx], ymin=-20, ymax=40, linestyles="--", linewidth=1) +plt.legend(loc="upper right") +plt.xlabel("$t$") +plt.ylabel("Trajectory") +plt.gcf().set_size_inches(12, 3) +plt.tight_layout() +plt.savefig(f'{PATH}/{figName}.pdf') + +# ----------------------------------------------------------------------------- +# %% Convergence plots +# ----------------------------------------------------------------------------- +tEnd = 1.24 +nStepsList = np.array([2, 5, 10, 20, 50, 100, 200, 500, 1000]) +dtVals = tEnd / nStepsList + + +def getError(uNum, uRef): + if uNum is None: # pragma: no cover + return np.inf + return np.linalg.norm(np.linalg.norm(uRef - uNum, np.inf, axis=-1), np.inf) + + +config = ["PIC", "MIN-SR-NS"] +for qDelta, sym in zip(config, symList): + figName = f"{SCRIPT}_conv_{qDelta}" + plt.figure(figName) + + for nSweeps in [1, 2, 3, 4, 5]: + params = getParamsSDC(quadType=quadType, numNodes=nNodes, nodeType=nodeType, qDeltaI=qDelta, nSweeps=nSweeps) + + errors = [] + + for nSteps in nStepsList: + print(f' -- nSteps={nSteps} ...') + + uRef = solutionExact(tEnd, nSteps, "LORENZ", u0=(5, -5, 20)) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, "LORENZ", u0=(5, -5, 20)) + + err = getError(uSDC, uRef) + errors.append(err) + + # error VS dt + label = f"$K={nSweeps}$" + plt.loglog(dtVals, errors, sym + '-', label=f"$K={nSweeps}$") + data.storeAndCheck(f"{figName}_{label}", errors[1:]) + + x = dtVals[4:] + for k in [1, 2, 3, 4, 5, 6]: + plt.loglog(x, 1e4 * x**k, "--", color="gray", linewidth=0.8) + + plt.gca().set( + xlabel=r"$\Delta{t}$", + ylabel=r"$L_\infty$ error", + ylim=(8.530627786509715e-12, 372.2781393394293), + ) + plt.legend(loc="lower right") + plt.grid() + plt.tight_layout() + plt.savefig(f"{PATH}/{figName}.pdf") + + +# ----------------------------------------------------------------------------- +# %% Error VS cost plots +# ----------------------------------------------------------------------------- +def getCost(counters): + nNewton, nRHS, tComp = counters + return nNewton + nRHS + + +minPrec = ["MIN-SR-NS", "MIN-SR-S", "MIN-SR-FLEX"] + +symList = ['^', '>', '<', 'o', 's', '*'] +config = [ + [(*minPrec, "LU", "EE", "PIC"), 4], + [(*minPrec, "VDHS", "RK4", "ESDIRK43"), 4], + [(*minPrec, "PIC", "RK4", "ESDIRK43"), 5], +] + + +i = 0 +for qDeltaList, nSweeps in config: + figName = f"{SCRIPT}_cost_{i}" + i += 1 + plt.figure(figName) + + for qDelta, sym in zip(qDeltaList, symList): + try: + params = getParamsRK(qDelta) + except KeyError: + params = getParamsSDC( + quadType=quadType, numNodes=nNodes, nodeType=nodeType, qDeltaI=qDelta, nSweeps=nSweeps + ) + + errors = [] + costs = [] + + for nSteps in nStepsList: + uRef = solutionExact(tEnd, nSteps, "LORENZ", u0=(5, -5, 20)) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, "LORENZ", u0=(5, -5, 20)) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + assert qDelta != "EE", "wait, whaaat ??" + cost /= nNodes * parEfficiency + costs.append(cost) + + # error VS cost + ls = '-' if qDelta.startswith("MIN-SR-") else "--" + plt.loglog(costs, errors, sym + ls, label=qDelta) + data.storeAndCheck(f"{figName}_{qDelta}", errors[2:]) + + plt.gca().set( + xlabel="Cost", + ylabel=r"$L_\infty$ error", + ylim=(1e-10, 400), + xlim=(30, 20000), + ) + plt.legend(loc="lower left") + plt.grid() + plt.tight_layout() + plt.savefig(f"{PATH}/{figName}.pdf") + +data.writeToJSON() diff --git a/pySDC/projects/parallelSDC_reloaded/scripts/fig04_protheroRobinson.py b/pySDC/projects/parallelSDC_reloaded/scripts/fig04_protheroRobinson.py new file mode 100644 index 0000000000..ae004ca75b --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/scripts/fig04_protheroRobinson.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Jan 11 10:21:47 2024 + +Figures with experiment on the Prothero-Robinson problem +""" +import os +import numpy as np + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact, getParamsSDC, solutionSDC, getParamsRK, plt +from pySDC.helpers.testing import DataChecker + +data = DataChecker(__file__) + +PATH = '/' + os.path.join(*__file__.split('/')[:-1]) +SCRIPT = __file__.split('/')[-1].split('.')[0] + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 + +# SDC parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 0.8 # 1/nNodes + +epsilon = 1e-3 + +# ----------------------------------------------------------------------------- +# %% Convergence and error VS cost plots +# ----------------------------------------------------------------------------- +tEnd = 2 * np.pi +nStepsList = np.array([2, 5, 10, 20, 50, 100, 200, 500, 1000]) +dtVals = tEnd / nStepsList + + +def getError(uNum, uRef): + if uNum is None: + return np.inf + return np.linalg.norm(np.linalg.norm(uRef - uNum, np.inf, axis=-1), np.inf) + + +def getCost(counters): + nNewton, nRHS, tComp = counters + return nNewton + nRHS + + +minPrec = ["MIN-SR-NS", "MIN-SR-S", "MIN-SR-FLEX"] + +symList = ['^', '>', '<', 'o', 's', '*', 'p'] +config = [ + [(*minPrec, "VDHS", "ESDIRK43", "LU"), 4], + [(*minPrec, "VDHS", "ESDIRK43", "LU"), 6], +] + + +i = 0 +for qDeltaList, nSweeps in config: + figNameConv = f"{SCRIPT}_conv_{i}" + figNameCost = f"{SCRIPT}_cost_{i}" + i += 1 + + for qDelta, sym in zip(qDeltaList, symList): + try: + params = getParamsRK(qDelta) + except KeyError: + params = getParamsSDC( + quadType=quadType, numNodes=nNodes, nodeType=nodeType, qDeltaI=qDelta, nSweeps=nSweeps + ) + + errors = [] + costs = [] + + for nSteps in nStepsList: + uRef = solutionExact(tEnd, nSteps, "PROTHERO-ROBINSON", epsilon=epsilon) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, "PROTHERO-ROBINSON", epsilon=epsilon) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + cost /= nNodes * parEfficiency + costs.append(cost) + + ls = '-' if qDelta.startswith("MIN-SR-") else "--" + + plt.figure(figNameConv) + plt.loglog(dtVals, errors, sym + ls, label=qDelta) + data.storeAndCheck(f"{figNameConv}_{qDelta}", errors) + + plt.figure(figNameCost) + plt.loglog(costs, errors, sym + ls, label=qDelta) + + for figName in [figNameConv, figNameCost]: + plt.figure(figName) + plt.gca().set( + xlabel="Cost" if "cost" in figName else r"$\Delta {t}$", + ylabel=r"$L_\infty$ error", + ) + plt.legend() + plt.grid(True) + plt.tight_layout() + plt.savefig(f"{PATH}/{figName}.pdf") + +data.writeToJSON() diff --git a/pySDC/projects/parallelSDC_reloaded/scripts/fig05_allenCahn.py b/pySDC/projects/parallelSDC_reloaded/scripts/fig05_allenCahn.py new file mode 100644 index 0000000000..78e3e3bd46 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/scripts/fig05_allenCahn.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Jan 11 11:14:01 2024 + +Figures with experiments on the Allen-Cahn problem +""" +import os +import numpy as np + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact, getParamsSDC, solutionSDC, getParamsRK, plt +from pySDC.helpers.testing import DataChecker + +data = DataChecker(__file__) + +PATH = '/' + os.path.join(*__file__.split('/')[:-1]) +SCRIPT = __file__.split('/')[-1].split('.')[0] + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 + +# SDC parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 0.8 # 1/nNodes +nSweeps = 4 + +# Problem parameters +pName = "ALLEN-CAHN" +tEnd = 50 +pParams = { + "periodic": False, + "nvars": 2**11 - 1, + "epsilon": 0.04, +} + +# ----------------------------------------------------------------------------- +# Trajectories (reference solution) +# ----------------------------------------------------------------------------- +uExact = solutionExact(tEnd, 1, pName, **pParams) +x = np.linspace(-0.5, 0.5, 2**11 + 1)[1:-1] + +figName = f"{SCRIPT}_solution" +plt.figure(figName) +plt.plot(x, uExact[0, :], '-', label="$u(0)$") +plt.plot(x, uExact[-1, :], '--', label="$u(T)$") + +plt.legend() +plt.xlabel("$x$") +plt.ylabel("Solution") +plt.gcf().set_size_inches(12, 3) +plt.tight_layout() +plt.savefig(f"{PATH}/{figName}.pdf") + +# ----------------------------------------------------------------------------- +# %% Convergence and error VS cost plots +# ----------------------------------------------------------------------------- +nStepsList = np.array([1, 2, 5, 10, 20, 50, 100, 200, 500]) +dtVals = tEnd / nStepsList + + +def getError(uNum, uRef): + if uNum is None: + return np.inf + return np.linalg.norm(uRef[-1, :] - uNum[-1, :], ord=2) + + +def getCost(counters): + nNewton, nRHS, tComp = counters + return 2 * nNewton + nRHS + + +minPrec = ["MIN-SR-NS", "MIN-SR-S", "MIN-SR-FLEX"] + +symList = ['^', '>', '<', 'o', 's', '*', 'p'] +config = [ + (*minPrec, "VDHS", "ESDIRK43", "LU"), +] + + +i = 0 +for qDeltaList in config: + figNameConv = f"{SCRIPT}_conv_{i}" + figNameCost = f"{SCRIPT}_cost_{i}" + i += 1 + + for qDelta, sym in zip(qDeltaList, symList): + try: + params = getParamsRK(qDelta) + except KeyError: + params = getParamsSDC( + quadType=quadType, numNodes=nNodes, nodeType=nodeType, qDeltaI=qDelta, nSweeps=nSweeps + ) + + errors = [] + costs = [] + + for nSteps in nStepsList: + uRef = solutionExact(tEnd, nSteps, pName, **pParams) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, pName, **pParams) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + cost /= nNodes * parEfficiency + costs.append(cost) + + ls = '-' if qDelta.startswith("MIN-SR-") else "--" + + plt.figure(figNameConv) + plt.loglog(dtVals, errors, sym + ls, label=qDelta) + data.storeAndCheck(f"{figNameConv}_{qDelta}", errors, atol=1e-4, rtol=1e-4) + + plt.figure(figNameCost) + plt.loglog(costs, errors, sym + ls, label=qDelta) + + for figName in [figNameConv, figNameCost]: + plt.figure(figName) + plt.gca().set( + xlabel="Cost" if "cost" in figName else r"$\Delta {t}$", + ylabel=r"$L_2$ error at $T$", + ) + plt.legend() + plt.grid(True) + plt.tight_layout() + plt.savefig(f"{PATH}/{figName}.pdf") + +data.writeToJSON() diff --git a/pySDC/projects/parallelSDC_reloaded/scripts/run.sh b/pySDC/projects/parallelSDC_reloaded/scripts/run.sh new file mode 100755 index 0000000000..960418595e --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/scripts/run.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# Run all python scripts +for script in fig*.py +do + echo "------------------------------------------------------------" + echo "Running ${script} python script ..." + python "${script}" +done \ No newline at end of file diff --git a/pySDC/projects/parallelSDC_reloaded/scripts/utils.py b/pySDC/projects/parallelSDC_reloaded/scripts/utils.py new file mode 120000 index 0000000000..50fbc6d8f5 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/scripts/utils.py @@ -0,0 +1 @@ +../utils.py \ No newline at end of file diff --git a/pySDC/projects/parallelSDC_reloaded/stability.py b/pySDC/projects/parallelSDC_reloaded/stability.py new file mode 100644 index 0000000000..f83706ffaa --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/stability.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Jan 9 10:12:09 2024 + +Compute stability regions for SDC wit given parameters +""" +import numpy as np +from pySDC.projects.parallelSDC_reloaded.utils import getParamsRK, getParamsSDC, solutionSDC, plotStabContour, plt + +SCRIPT = __file__.split('/')[-1].split('.')[0] + +# Script parameters +useRK = False +zoom = 20 +reLims = -4.5 * zoom, 0.5 * zoom +imLims = -3.5 * zoom, 3.5 * zoom +nVals = 251 + + +# RK parameters +rkScheme = "RK4" + +# Collocation parameters +nNodes = 4 +nodeType = "LEGENDRE" +quadType = "RADAU-RIGHT" + +# SDC parameters +nSweeps = 6 +qDeltaType = "VDHS" +collUpdate = False + + +# ----------------------------------------------------------------------------- +# Script execution +# ----------------------------------------------------------------------------- + +# Scheme instanciation +if useRK: # pragma: no cover + params = getParamsRK(rkScheme) +else: + params = getParamsSDC(quadType, nNodes, qDeltaType, nSweeps, nodeType, collUpdate) + +# Problem instanciation +reVals = np.linspace(*reLims, num=nVals) +imVals = np.linspace(*imLims, num=nVals) +lambdas = reVals[None, :] + 1j * imVals[:, None] +uNum, counters, parallel = solutionSDC(1, 1, params, 'DAHLQUIST', lambdas=lambdas.ravel()) + +uEnd = uNum[-1, :].reshape(lambdas.shape) +stab = np.abs(uEnd) + +fig, axs = plt.subplots(1, 2) + +ax = plotStabContour(reVals, imVals, stab, ax=axs[0]) +if useRK: # pragma: no cover + ax.set_title(rkScheme) +else: + ax.set_title(f"{qDeltaType}, K={nSweeps}") + +imStab = stab[:, np.argwhere(reVals == 0)].ravel() +axs[1].semilogx(imStab, imVals) +axs[1].tick_params( + axis='x', # changes apply to the x-axis + which='both', # both major and minor ticks are affected + bottom=True, # ticks along the bottom edge are off + top=False, # ticks along the top edge are off + labelbottom=True, +) +axs[1].vlines(1, *imLims, linestyles='--', colors='black', linewidth=1) +axs[1].set_xlim([0.1, 10]) +axs[1].set_ylim(*imLims) +axs[1].set_aspect(0.2) +axs[1].set_xticks([0.1, 1, 10]) +axs[1].set_title("Imaginary axis") + +plt.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/utils.py b/pySDC/projects/parallelSDC_reloaded/utils.py new file mode 100644 index 0000000000..898f0253ad --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/utils.py @@ -0,0 +1,324 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Nov 12 18:50:39 2023 + +Utility functions to investigate parallel SDC on non-linear problems +""" +import os +import json +import numpy as np +from time import time +import warnings + +from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + +from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol +from pySDC.implementations.problem_classes.Lorenz import LorenzAttractor +from pySDC.implementations.problem_classes.odeScalar import ProtheroRobinson +from pySDC.implementations.problem_classes.odeSystem import ( + ProtheroRobinsonAutonomous, + Kaps, + ChemicalReaction3Var, + JacobiElliptic, +) +from pySDC.implementations.problem_classes.AllenCahn_1D_FD import ( + allencahn_front_fullyimplicit, + allencahn_periodic_fullyimplicit, +) +from pySDC.implementations.problem_classes.TestEquation_0D import testequation0d + +from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit +import pySDC.implementations.sweeper_classes.Runge_Kutta as rk + +import matplotlib.pyplot as plt + +PATH = '/' + os.path.join(*__file__.split('/')[:-1]) + +# General matplotlib settings +plt.rc('font', size=12) +plt.rcParams['lines.linewidth'] = 2 +plt.rcParams['axes.titlesize'] = 16 +plt.rcParams['axes.labelsize'] = 16 +plt.rcParams['xtick.labelsize'] = 15 +plt.rcParams['ytick.labelsize'] = 15 +plt.rcParams['xtick.major.pad'] = 3 +plt.rcParams['ytick.major.pad'] = 2 +plt.rcParams['axes.labelpad'] = 6 +plt.rcParams['markers.fillstyle'] = 'none' +plt.rcParams['lines.markersize'] = 7.0 +plt.rcParams['lines.markeredgewidth'] = 1.5 +plt.rcParams['mathtext.fontset'] = 'cm' +plt.rcParams['mathtext.rm'] = 'serif' +plt.rcParams['figure.max_open_warning'] = 100 + + +def getParamsSDC( + quadType="RADAU-RIGHT", numNodes=4, qDeltaI="IE", nSweeps=3, nodeType="LEGENDRE", collUpdate=False, initType="copy" +): + description = { + # Sweeper and its parameters + "sweeper_class": generic_implicit, + "sweeper_params": { + "quad_type": quadType, + "num_nodes": numNodes, + "node_type": nodeType, + "initial_guess": initType, + "do_coll_update": collUpdate, + "QI": qDeltaI, + 'skip_residual_computation': ('IT_CHECK', 'IT_DOWN', 'IT_UP', 'IT_FINE', 'IT_COARSE'), + }, + # Step parameters + "step_params": { + "maxiter": 1, + }, + # Level parameters + "level_params": { + "restol": -1, + "nsweeps": nSweeps, + }, + } + + return description + + +RK_SWEEPERS = { + "BE": rk.BackwardEuler, + "FE": rk.ForwardEuler, + "RK4": rk.RK4, + "DIRK43": rk.DIRK43, + "ESDIRK53": rk.ESDIRK53, + "ESDIRK43": rk.ESDIRK43, +} + + +def getParamsRK(method="RK4"): + description = { + # Sweeper and its parameters + "sweeper_class": RK_SWEEPERS[method], + "sweeper_params": {'skip_residual_computation': ('IT_CHECK', 'IT_DOWN', 'IT_UP', 'IT_FINE', 'IT_COARSE')}, + # Step parameters + "step_params": { + "maxiter": 1, + }, + # Level parameters + "level_params": { + "restol": -1, + "nsweeps": 1, + }, + } + + return description + + +def setupProblem(name, description, dt, **kwargs): + """Add problem settings to pySDC description parameters""" + + # Common newton tolerance and max number of iterations + description["problem_params"] = { + 'newton_tol': 1e-8, + 'newton_maxiter': 300, + } + # Level parameters + description["level_params"]["dt"] = dt + + if name == "VANDERPOL": + description["problem_class"] = vanderpol + description["problem_params"].update( + { + 'newton_tol': 1e-12, + 'mu': kwargs.get("mu", 10), # vanderpol parameter + 'u0': np.array([2.0, 0]), + } + ) + elif name == "LORENZ": + description["problem_class"] = LorenzAttractor + description["problem_params"].update( + { + 'newton_tol': 1e-12, + 'u0': kwargs.get("u0", (1, 1, 1)), + } + ) + elif name == "PROTHERO-ROBINSON": + description["problem_class"] = ProtheroRobinson + description["problem_params"].update( + { + 'newton_tol': 1e-12, + 'epsilon': kwargs.get("epsilon", 1e-3), + } + ) + elif name == "PROTHERO-ROBINSON-NL": + description["problem_class"] = ProtheroRobinson + description["problem_params"].update( + { + 'newton_tol': 1e-12, + 'epsilon': kwargs.get("epsilon", 1e-3), + 'nonLinear': True, + } + ) + elif name == "PROTHERO-ROBINSON-A": + description["problem_class"] = ProtheroRobinsonAutonomous + description["problem_params"].update( + { + 'newton_tol': 1e-12, + 'epsilon': kwargs.get("epsilon", 1e-3), + } + ) + elif name == "PROTHERO-ROBINSON-A-NL": + description["problem_class"] = ProtheroRobinsonAutonomous + description["problem_params"].update( + { + 'newton_tol': 1e-12, + 'epsilon': kwargs.get("epsilon", 1e-3), + 'nonLinear': True, + } + ) + elif name == "KAPS": + description["problem_class"] = Kaps + description["problem_params"].update( + { + 'epsilon': kwargs.get("epsilon", 1e-3), + } + ) + elif name == "CHEMREC": + description["problem_class"] = ChemicalReaction3Var + elif name == "ALLEN-CAHN": + periodic = kwargs.get("periodic", False) + description["problem_class"] = allencahn_periodic_fullyimplicit if periodic else allencahn_front_fullyimplicit + description["problem_params"].update( + { + 'newton_tol': 1e-8, + 'nvars': kwargs.get("nvars", 128 if periodic else 127), + 'eps': kwargs.get("epsilon", 0.04), + 'stop_at_maxiter': True, + } + ) + elif name == "JACELL": + description["problem_class"] = JacobiElliptic + elif name == "DAHLQUIST": + lambdas = kwargs.get("lambdas", None) + description["problem_class"] = testequation0d + description["problem_params"].update( + { + "lambdas": lambdas, + "u0": 1.0, + } + ) + description["problem_params"].pop("newton_tol") + description["problem_params"].pop("newton_maxiter") + else: + raise NotImplementedError(f"problem {name} not implemented") + + +def solutionSDC(tEnd, nSteps, params, probName, **kwargs): + dt = tEnd / nSteps + setupProblem(probName, params, dt, **kwargs) + + controller = controller_nonMPI(num_procs=1, controller_params={'logger_level': 30}, description=params) + + prob = controller.MS[0].levels[0].prob + + uInit = prob.u_exact(0) + uTmp = uInit.copy() + + uSDC = np.zeros((nSteps + 1, uInit.size), dtype=uInit.dtype) + uSDC[0] = uInit + tVals = np.linspace(0, tEnd, nSteps + 1) + tBeg = time() + print(" -- computing numerical solution with pySDC ...") + warnings.filterwarnings("ignore") + for i in range(nSteps): + uTmp[:] = uSDC[i] + try: + uSDC[i + 1], _ = controller.run(u0=uTmp, t0=tVals[i], Tend=tVals[i + 1]) + except Exception as e: + print(f" -- exception when running controller : {e}") + warnings.resetwarnings() + return None, (0, 0, 0), False + warnings.resetwarnings() + tComp = time() - tBeg + + try: + nNewton = prob.work_counters["newton"].niter + except KeyError: + nNewton = 0 + nRHS = prob.work_counters["rhs"].niter + print(f" done, newton:{nNewton}, rhs:{nRHS}, tComp:{tComp}") + try: + parallel = controller.MS[0].levels[0].sweep.parallelizable + except AttributeError: # pragma: no cover + parallel = False + + return uSDC, (nNewton, nRHS, tComp), parallel + + +def solutionExact(tEnd, nSteps, probName, **kwargs): + """Return the exact solution of the Van-der-Pol problem at tEnd""" + + if probName == "VANDERPOL": + mu = kwargs.get('mu', 10) + key = f"{tEnd}_{nSteps}_{mu}" + cacheFile = '_solVanderpolExact.json' + elif probName == "LORENZ": + u0 = kwargs.get('u0', (1, 1, 1)) + key = f"{tEnd}_{nSteps}_{u0}" + cacheFile = '_solLorenzExact.json' + elif probName == "CHEMREC": + key = f"{tEnd}_{nSteps}" + cacheFile = '_solChemicalReactionExact.json' + elif probName == "JACELL": + key = f"{tEnd}_{nSteps}" + cacheFile = '_solJacobiEllipticExact.json' + + # Eventually load already computed solution from local cache + try: + with open(f"{PATH}/{cacheFile}", "r") as f: + cache = json.load(f) + if key in cache: + return np.array(cache[key]) + except (FileNotFoundError, json.JSONDecodeError, UnboundLocalError): + cache = {} + + # Compute solution + params = getParamsSDC() + dt = tEnd / nSteps + setupProblem(probName, params, dt, **kwargs) + + controller = controller_nonMPI(num_procs=1, controller_params={'logger_level': 30}, description=params) + solver = controller.MS[0].levels[0].prob.u_exact + + print(" -- computing analytical solution with P.u_exact ...") + tBeg = time() + tVals = np.linspace(0, tEnd, nSteps + 1) + uExact = [solver(0)] + for i in range(nSteps): + try: + uExact.append(solver(tVals[i + 1], uExact[-1], tVals[i])) + except TypeError: + uExact.append(solver(tVals[i + 1])) + uExact = np.array(uExact) + print(f" done in {time()-tBeg:1.2f}s") + + try: + # Save solution in local cache + cache[key] = uExact.tolist() + with open(f"{PATH}/{cacheFile}", "w") as f: + json.dump(cache, f) + except UnboundLocalError: + pass + + return uExact + + +# Plotting functions +def plotStabContour(reVals, imVals, stab, ax=None): + if ax is None: + ax = plt.gca() + ax.contour(reVals, imVals, stab, levels=[1.0], colors='black', linewidths=1) + ax.contourf(reVals, imVals, stab, levels=[1.0, np.inf], colors='gainsboro') + ax.hlines(0, min(reVals), max(reVals), linestyles='--', colors='black', linewidth=0.5) + ax.vlines(0, min(imVals), max(imVals), linestyles='--', colors='black', linewidth=0.5) + ax.set_aspect('equal', 'box') + ax.set_xlabel(r"$Re(z)$", labelpad=0, fontsize=10) + ax.set_ylabel(r"$Im(z)$", labelpad=0, fontsize=10) + return ax diff --git a/pySDC/projects/parallelSDC_reloaded/vanderpol_accuracy.py b/pySDC/projects/parallelSDC_reloaded/vanderpol_accuracy.py new file mode 100644 index 0000000000..82e7e01d98 --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/vanderpol_accuracy.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Nov 12 22:14:03 2023 + +Script to investigate diagonal SDC on Van der Pol with different mu parameters, +in particular with graphs such as : + +- error VS time-step +- error VS computation cost + +Note : implementation in progress ... +""" +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import getParamsSDC, getParamsRK, solutionSDC, solutionExact + +muVals = [0.1, 2, 10] +tEndVals = [6.3, 7.6, 18.9] # tEnd = 1 period for each mu + + +def getError(uNum, uRef): + if uNum is None: + return np.inf + return np.linalg.norm(uRef[:, 0] - uNum[:, 0], np.inf) + + +def getCost(counters): + nNewton, nRHS, tComp = counters + return nNewton + nRHS + + +# Base variable parameters +nNodes = 4 +quadType = 'RADAU-RIGHT' +nodeType = 'LEGENDRE' +parEfficiency = 1 / nNodes + +qDeltaList = [ + 'RK4', + 'ESDIRK43', + 'LU', + # 'IE', 'LU', 'IEpar', 'PIC', + 'MIN-SR-NS', + 'MIN-SR-S', + 'MIN-SR-FLEX', +] +nStepsList = np.array([2, 5, 10, 20, 50, 100, 200]) +nSweepList = [1, 2, 3, 4, 5, 6] + + +symList = ['o', '^', 's', '>', '*', '<', 'p', '>'] * 10 + +# qDeltaList = ['LU'] +nSweepList = [4] + +fig, axs = plt.subplots(2, len(muVals)) + +for j, (mu, tEnd) in enumerate(zip(muVals, tEndVals)): + print("-" * 80) + print(f"mu={mu}") + print("-" * 80) + + dtVals = tEnd / nStepsList + + i = 0 + for qDelta in qDeltaList: + for nSweeps in nSweepList: + sym = symList[i] + i += 1 + + name = f"{qDelta}({nSweeps})" + try: + params = getParamsRK(qDelta) + name = name[:-3] + except KeyError: + params = getParamsSDC( + quadType=quadType, numNodes=nNodes, nodeType=nodeType, qDeltaI=qDelta, nSweeps=nSweeps + ) + print(f'computing for {name} ...') + + errors = [] + costs = [] + + for nSteps in nStepsList: + print(f' -- nSteps={nSteps} ...') + + uRef = solutionExact(tEnd, nSteps, "VANDERPOL", mu=mu) + + uSDC, counters, parallel = solutionSDC(tEnd, nSteps, params, "VANDERPOL", mu=mu) + + err = getError(uSDC, uRef) + errors.append(err) + + cost = getCost(counters) + if parallel: + cost /= nNodes * parEfficiency + costs.append(cost) + + # error VS dt + axs[0, j].loglog(dtVals, errors, sym + '-', label=name) + # error VS cost + axs[1, j].loglog(costs, errors, sym + '-', label=name) + + for i in range(2): + if i == 0: + axs[i, j].set_title(f"mu={mu}") + axs[i, j].set( + xlabel=r"$\Delta{t}$" if i == 0 else "cost", + ylabel=r"$L_\infty$ error", + ylim=(1e-11, 10), + ) + axs[i, j].legend(loc="lower right" if i == 0 else "lower left") + axs[i, j].grid() + +fig.set_size_inches(18.2, 10.4) +fig.tight_layout() diff --git a/pySDC/projects/parallelSDC_reloaded/vanderpol_setup.py b/pySDC/projects/parallelSDC_reloaded/vanderpol_setup.py new file mode 100644 index 0000000000..1d6c59461c --- /dev/null +++ b/pySDC/projects/parallelSDC_reloaded/vanderpol_setup.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Nov 12 21:35:05 2023 + +Script to numerically determine periods of Van der Pol oscillation for +different mu parameters. +""" +import numpy as np +from scipy import signal +import matplotlib.pyplot as plt + +from pySDC.projects.parallelSDC_reloaded.utils import solutionExact + +script = __file__.split('/')[-1].split('.')[0] + + +muVals = [0.1, 2, 10] +muPeriods = [] + +tEnd = 20 +nSteps = 200 +tVals = np.linspace(0, tEnd, nSteps + 1) + +# Compute and plot unscaled solution to determined period for each mu +for mu in muVals: + print(f"Computing exact solution up to t={tEnd} for mu={mu} ...") + uExact = solutionExact(tEnd, nSteps, "VANDERPOL", mu=mu) + plt.figure(f"{script}_traj") + plt.plot(tVals, uExact[:, 0], '-', label=f"$\\mu={mu}$") + plt.figure(f"{script}_accel") + plt.plot(tVals, uExact[:, 1], '-', label=f"$\\mu={mu}$") + + x = uExact[:, 0] + idx = signal.find_peaks(x)[0][0] + period = tVals[idx] + print(f" -- done, found period={period:.1f}") + muPeriods.append(period) + +# Compute and plot solution for each mu on one period, scale time with period +for mu, tEnd in zip(muVals, muPeriods): + nSteps = 200 + tVals = np.linspace(0, tEnd, nSteps + 1) + + print(f"Computing exact solution up to t={tEnd:.1f} for mu={mu} ...") + uExact = solutionExact(tEnd, nSteps, "VANDERPOL", mu=mu) + plt.figure(f"{script}_traj_scaled") + plt.plot(tVals / tEnd, uExact[:, 0], '-', label=f"$\\mu={mu}$") + print(' -- done') + +# Figure settings +for figName in [f"{script}_traj", f"{script}_accel", f"{script}_traj_scaled"]: + plt.figure(figName) + plt.legend() + plt.xlabel("time (scaled)" if "scaled" in figName else "time") + plt.ylabel("trajectory" if "traj" in figName else "acceleration") + plt.tight_layout() diff --git a/pySDC/tests/test_datatypes/test_multicomponent_mesh.py b/pySDC/tests/test_datatypes/test_multicomponent_mesh.py new file mode 100644 index 0000000000..a49bbc0f5c --- /dev/null +++ b/pySDC/tests/test_datatypes/test_multicomponent_mesh.py @@ -0,0 +1,64 @@ +import pytest + + +@pytest.mark.base +@pytest.mark.parametrize('shape', [1, (3,), (2, 4)]) +def test_MultiComponentMesh(shape): + from pySDC.implementations.datatype_classes.mesh import MultiComponentMesh + import numpy as np + + class TestMesh(MultiComponentMesh): + components = ['a', 'b'] + + # instantiate meshes + init = (shape, None, np.dtype('D')) + A = TestMesh(init) + B = TestMesh(A) + + # fill part of the meshes with values + a = np.random.random(shape) + b = np.random.random(shape) + zero = np.zeros_like(a) + A.a[:] = a + B.a[:] = b + + # check that the meshes have been prepared appropriately + for M, m in zip([A, B], [a, b]): + assert M.shape == (len(TestMesh.components),) + ((shape,) if type(shape) is int else shape) + assert np.allclose(M.a, m) + assert np.allclose(M.b, zero) + assert np.shares_memory(M, M.a) + assert np.shares_memory(M, M.b) + assert not np.shares_memory(M.a, m) + + # check that various computations give the desired results + assert np.allclose(A.a + B.a, a + b) + assert np.allclose((A + B).a, a + b) + assert np.allclose((A + B).b, zero) + + C = A - B + assert np.allclose(C.a, a - b) + assert np.allclose(C.b, zero) + assert not np.shares_memory(A, C) + assert not np.shares_memory(B, C) + + D = np.exp(A) + assert type(D) == TestMesh + assert np.allclose(D.a, np.exp(a)) + assert np.allclose(D.b, zero + 1) + assert not np.shares_memory(A, D) + + B *= A + assert np.allclose(B.a, a * b) + assert np.allclose(A.a, a) + assert np.allclose(B.b, zero) + assert np.allclose(A.b, zero) + assert not np.shares_memory(A, B) + + A /= 10.0 + assert np.allclose(A.a, a / 10) + assert np.allclose(A.b, zero) + + +if __name__ == '__main__': + test_MultiComponentMesh(1) diff --git a/pySDC/tests/test_helpers/.gitignore b/pySDC/tests/test_helpers/.gitignore new file mode 100644 index 0000000000..abfec30f75 --- /dev/null +++ b/pySDC/tests/test_helpers/.gitignore @@ -0,0 +1 @@ +_dataRef.json \ No newline at end of file diff --git a/pySDC/tests/test_helpers/test_testing.py b/pySDC/tests/test_helpers/test_testing.py new file mode 100644 index 0000000000..7e9c29650e --- /dev/null +++ b/pySDC/tests/test_helpers/test_testing.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Fri Feb 9 20:14:06 2024 +""" +import os +import warnings +import pytest + +from pySDC.helpers.testing import DataChecker + + +@pytest.mark.base +def test_DataChecker(): + result = [1, 2, 3, 4] + correct = result + wrong = [1, 2, 3, 3] + + d1 = DataChecker(__file__) + if os.path.isfile(d1._dataRefFile): + os.remove(d1._dataRefFile) + + warnings.filterwarnings("error") + try: + d1.storeAndCheck('r1', result) + except UserWarning: + pass + else: + raise AssertionError("no reference data does not raise warning") + d1.writeToJSON() + os.rename(d1._dataFile, d1._dataRefFile) + + d2 = DataChecker(__file__) + try: + d2.storeAndCheck('r1', result) + except UserWarning: + raise AssertionError("warning raised with reference data available") + d2.writeToJSON() + warnings.resetwarnings() + + d3 = DataChecker(__file__) + + try: + d3.storeAndCheck('r1', wrong) + except AssertionError: + pass + else: + raise AssertionError("wrong data does not raise assertion error") + + try: + d3.storeAndCheck('r2', correct) + except AssertionError: + pass + else: + raise AssertionError("wrong key does not raise assertion error") + + try: + d3.storeAndCheck('r1', correct[:-1]) + except AssertionError: + pass + else: + raise AssertionError("data with incorrect size does not raise assertion error") diff --git a/pySDC/tests/test_hooks/test_log_to_file.py b/pySDC/tests/test_hooks/test_log_to_file.py new file mode 100644 index 0000000000..0f0d48f0e2 --- /dev/null +++ b/pySDC/tests/test_hooks/test_log_to_file.py @@ -0,0 +1,85 @@ +import pytest + + +def run(hook, Tend=0): + from pySDC.implementations.problem_classes.TestEquation_0D import testequation0d + from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit + from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + + level_params = {'dt': 1.0e-1} + + sweeper_params = { + 'num_nodes': 1, + 'quad_type': 'GAUSS', + } + + description = { + 'level_params': level_params, + 'sweeper_class': generic_implicit, + 'problem_class': testequation0d, + 'sweeper_params': sweeper_params, + 'problem_params': {}, + 'step_params': {'maxiter': 1}, + } + + controller_params = { + 'hook_class': hook, + 'logger_level': 30, + } + controller = controller_nonMPI(1, controller_params, description) + if Tend > 0: + prob = controller.MS[0].levels[0].prob + u0 = prob.u_exact(0) + + _, stats = controller.run(u0, 0, Tend) + return stats + + +@pytest.mark.base +def test_errors(): + from pySDC.implementations.hooks.log_solution import LogToFile + import os + + with pytest.raises(ValueError): + run(LogToFile) + + LogToFile.path = os.getcwd() + run(LogToFile) + + path = f'{os.getcwd()}/tmp' + LogToFile.path = path + run(LogToFile) + os.path.isdir(path) + + with pytest.raises(ValueError): + LogToFile.path = __file__ + run(LogToFile) + + +@pytest.mark.base +def test_logging(): + from pySDC.implementations.hooks.log_solution import LogToFile, LogSolution + from pySDC.helpers.stats_helper import get_sorted + import os + import pickle + import numpy as np + + path = f'{os.getcwd()}/tmp' + LogToFile.path = path + Tend = 2 + + stats = run([LogToFile, LogSolution], Tend=Tend) + u = get_sorted(stats, type='u') + + u_file = [] + for i in range(len(u)): + data = LogToFile.load(i) + u_file += [(data['t'], data['u'])] + + for us, uf in zip(u, u_file): + assert us[0] == uf[0] + assert np.allclose(us[1], uf[1]) + + +if __name__ == '__main__': + test_logging() diff --git a/pySDC/tests/test_problems/test_AllenCahn_1D_FD.py b/pySDC/tests/test_problems/test_AllenCahn_1D_FD.py index 789eedc198..f2367a1956 100644 --- a/pySDC/tests/test_problems/test_AllenCahn_1D_FD.py +++ b/pySDC/tests/test_problems/test_AllenCahn_1D_FD.py @@ -172,7 +172,6 @@ def test_capture_errors_and_warnings(caplog, stop_at_nan): multi_periodic = allencahn_periodic_multiimplicit(**problem_params) t0 = 0.0 - dt = 1e-3 u0_front = full_front.u_exact(t0) u0_periodic = full_periodic.u_exact(t0) diff --git a/pySDC/tests/test_problems/test_Brusselator.py b/pySDC/tests/test_problems/test_Brusselator.py new file mode 100644 index 0000000000..9c651451c3 --- /dev/null +++ b/pySDC/tests/test_problems/test_Brusselator.py @@ -0,0 +1,40 @@ +import pytest + + +@pytest.mark.mpi4py +def test_Brusselator(): + """ + Test the implementation of the 2D Brusselator by doing an IMEX Euler step forward and then an explicit Euler step + backward to compute something akin to an error. We check that the "local error" has order 2. + """ + from pySDC.implementations.problem_classes.Brusselator import Brusselator + import numpy as np + + prob = Brusselator() + + dts = np.logspace(-3, -7, 15) + errors = [] + + for dt in dts: + + u0 = prob.u_exact(0) + f0 = prob.eval_f(u0, 0) + + # do an IMEX Euler step forward + u1 = prob.solve_system(u0 + dt * f0.expl, dt, u0, 0) + + # do an explicit Euler step backward + f1 = prob.eval_f(u1, dt) + u02 = u1 - dt * (f1.impl + f1.expl) + errors += [abs(u0 - u02)] + + errors = np.array(errors) + dts = np.array(dts) + order = np.log(errors[1:] / errors[:-1]) / np.log(dts[1:] / dts[:-1]) + + assert np.isclose(np.median(order), 2, atol=6e-2) + assert prob.work_counters['rhs'].niter == len(errors) * 2 + + +if __name__ == '__main__': + test_Brusselator() diff --git a/pySDC/tests/test_problems/test_GrayScottMPIFFT.py b/pySDC/tests/test_problems/test_GrayScottMPIFFT.py new file mode 100644 index 0000000000..63ed4fd5b0 --- /dev/null +++ b/pySDC/tests/test_problems/test_GrayScottMPIFFT.py @@ -0,0 +1,58 @@ +import pytest + + +@pytest.mark.mpi4py +@pytest.mark.parametrize('name', ['imex_diffusion', 'imex_linear', 'mi_diffusion', 'mi_linear']) +@pytest.mark.parametrize('spectral', [True, False]) +def test_GrayScottMPIFFT(name, spectral): + """ + Test the implementation of the Gray-Scott problem by doing an Euler step forward and then an explicit Euler step + backward to compute something akin to an error. We check that the "local error" has order 2. + + Keep + """ + if name == 'imex_diffusion': + from pySDC.implementations.problem_classes.GrayScott_MPIFFT import grayscott_imex_diffusion as problem_class + elif name == 'imex_linear': + from pySDC.implementations.problem_classes.GrayScott_MPIFFT import grayscott_imex_linear as problem_class + elif name == 'mi_diffusion': + from pySDC.implementations.problem_classes.GrayScott_MPIFFT import grayscott_mi_diffusion as problem_class + elif name == 'mi_linear': + from pySDC.implementations.problem_classes.GrayScott_MPIFFT import grayscott_mi_linear as problem_class + import numpy as np + + prob = problem_class(spectral=spectral, nvars=(127,) * 2) + + dts = np.logspace(-3, -7, 15) + errors = [] + + for dt in dts: + + u0 = prob.u_exact(0) + f0 = prob.eval_f(u0, 0) + + # do an IMEX or multi implicit Euler step forward + if 'solve_system_2' in dir(prob): + _u = prob.solve_system_1(u0, dt, u0, 0) + u1 = prob.solve_system_2(_u, dt, _u, 0) + else: + u1 = prob.solve_system(u0 + dt * f0.expl, dt, u0, 0) + + # do an explicit Euler step backward + f1 = prob.eval_f(u1, dt) + u02 = u1 - dt * (np.sum(f1, axis=0)) + errors += [abs(u0 - u02)] + + errors = np.array(errors) + dts = np.array(dts) + order = np.log(errors[1:] / errors[:-1]) / np.log(dts[1:] / dts[:-1]) + mean_order = np.median(order) + + assert np.isclose(np.median(order), 2, atol=1e-2), f'Expected order 2, but got {mean_order}' + assert prob.work_counters['rhs'].niter == len(errors) * 2 + if 'newton' in prob.work_counters.keys(): + assert prob.work_counters['newton'].niter > 0 + + +if __name__ == '__main__': + test_GrayScottMPIFFT('imex_diffusion', False) diff --git a/pySDC/tests/test_projects/test_AC/test_simple_forcing.py b/pySDC/tests/test_projects/test_AC/test_simple_forcing.py index 0c2a243ce9..c44589d5ca 100644 --- a/pySDC/tests/test_projects/test_AC/test_simple_forcing.py +++ b/pySDC/tests/test_projects/test_AC/test_simple_forcing.py @@ -5,10 +5,18 @@ @pytest.mark.mpi4py -def test_main_serial(): - from pySDC.projects.AllenCahn_Bayreuth.run_simple_forcing_verification import main, visualize_radii +@pytest.mark.parametrize('spectral', [True, False]) +@pytest.mark.parametrize('name', ['AC-test-noforce', 'AC-test-constforce', 'AC-test-timeforce']) +def test_main_serial(name, spectral): + from pySDC.projects.AllenCahn_Bayreuth.run_simple_forcing_verification import run_simulation + + run_simulation(name=name, spectral=spectral, nprocs_space=None) + + +@pytest.mark.mpi4py +def test_visualize_radii(): + from pySDC.projects.AllenCahn_Bayreuth.run_simple_forcing_verification import visualize_radii - main() visualize_radii() diff --git a/pySDC/tests/test_projects/test_DAE/test_DAEMesh.py b/pySDC/tests/test_projects/test_DAE/test_DAEMesh.py new file mode 100644 index 0000000000..297d35c800 --- /dev/null +++ b/pySDC/tests/test_projects/test_DAE/test_DAEMesh.py @@ -0,0 +1,76 @@ +import pytest + + +@pytest.mark.base +@pytest.mark.parametrize('shape', [(6,), (4, 6), (4, 6, 8)]) +def testInitialization(shape): + """ + Tests for a random init if initialization results in desired shape of mesh. + """ + + import numpy as np + from pySDC.projects.DAE.misc.DAEMesh import DAEMesh + + init = (shape, None, np.dtype('float64')) + mesh = DAEMesh(init) + + assert np.shape(mesh.diff) == shape, f'ERROR: Component diff does not have the desired length!' + assert np.shape(mesh.alg) == shape, f'ERROR: Component alg does not have the desired length!' + + assert len(mesh.components) == len(mesh), 'ERROR: Mesh does not contain two component arrays!' + + +@pytest.mark.base +def testInitializationGivenMesh(): + """ + Tests if for a given mesh the initialization results in the same mesh. + """ + + import numpy as np + from pySDC.projects.DAE.misc.DAEMesh import DAEMesh + + nvars_1d = 6 + init = (nvars_1d, None, np.dtype('float64')) + mesh1 = DAEMesh(init) + mesh1.diff[:] = np.arange(6) + mesh1.alg[:] = np.arange(6, 12) + + mesh2 = DAEMesh(mesh1) + + assert np.allclose(mesh1.diff, mesh2.diff) and np.allclose( + mesh1.alg, mesh2.alg + ), 'ERROR: Components in initialized meshes do not match!' + + +@pytest.mark.base +@pytest.mark.parametrize('shape', [(6,), (4, 6), (4, 6, 8)]) +def testArrayUFuncOperator(shape): + """ + Test if overloaded __array_ufunc__ operator of datatype does what it is supposed to do. + """ + + import numpy as np + from pySDC.projects.DAE.misc.DAEMesh import DAEMesh + + init = (shape, None, np.dtype('float64')) + mesh = DAEMesh(init) + mesh2 = DAEMesh(mesh) + + randomArr = np.random.random(shape) + mesh.diff[:] = randomArr + mesh2.diff[:] = 2 * randomArr + + subMesh = mesh - mesh2 + assert type(subMesh) == DAEMesh + assert np.allclose(subMesh.diff, randomArr - 2 * randomArr) + assert np.allclose(subMesh.alg, 0) + + addMesh = mesh + mesh2 + assert type(addMesh) == DAEMesh + assert np.allclose(addMesh.diff, randomArr + 2 * randomArr) + assert np.allclose(addMesh.alg, 0) + + sinMesh = np.sin(mesh) + assert type(sinMesh) == DAEMesh + assert np.allclose(sinMesh.diff, np.sin(randomArr)) + assert np.allclose(sinMesh.alg, 0) diff --git a/pySDC/tests/test_projects/test_DAE/test_HookClass_DAE.py b/pySDC/tests/test_projects/test_DAE/test_HookClass_DAE.py new file mode 100644 index 0000000000..65f583b3ff --- /dev/null +++ b/pySDC/tests/test_projects/test_DAE/test_HookClass_DAE.py @@ -0,0 +1,73 @@ +import numpy as np +import pytest + + +@pytest.mark.base +@pytest.mark.parametrize('M', [2, 3, 4]) +def testHookClassDiffAlgComps(M): + """ + Test if the hook class returns the correct errors. + """ + + from pySDC.helpers.stats_helper import get_sorted + from pySDC.projects.DAE.problems.DiscontinuousTestDAE import DiscontinuousTestDAE + from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE + from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + from pySDC.projects.DAE.misc.HookClass_DAE import ( + LogGlobalErrorPostStepDifferentialVariable, + LogGlobalErrorPostStepAlgebraicVariable, + ) + + dt = 1e-2 + level_params = { + 'restol': 1e-13, + 'dt': dt, + } + + problem_params = { + 'newton_tol': 1e-6, + } + + sweeper_params = { + 'quad_type': 'RADAU-RIGHT', + 'num_nodes': M, + 'QI': 'IE', + } + + step_params = { + 'maxiter': 45, + } + + controller_params = { + 'logger_level': 30, + 'hook_class': [LogGlobalErrorPostStepDifferentialVariable, LogGlobalErrorPostStepAlgebraicVariable], + } + + description = { + 'problem_class': DiscontinuousTestDAE, + 'problem_params': problem_params, + 'sweeper_class': fully_implicit_DAE, + 'sweeper_params': sweeper_params, + 'level_params': level_params, + 'step_params': step_params, + } + + controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description) + + t0 = 1.0 + Tend = t0 + dt + + P = controller.MS[0].levels[0].prob + uinit = P.u_exact(t0) + uex = P.u_exact(Tend) + + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + errHookDiff = np.array(get_sorted(stats, type='e_global_differential_post_step', sortby='time'))[:, 1] + errHookAlg = np.array(get_sorted(stats, type='e_global_algebraic_post_step', sortby='time'))[:, 1] + + errRunDiff = abs(uex.diff[0] - uend.diff[0]) + errRunAlg = abs(uex.alg[0] - uend.alg[0]) + + assert np.isclose(errHookDiff, errRunDiff), 'ERROR: Error in differential component does not match!' + assert np.isclose(errHookAlg, errRunAlg), 'ERROR: Error in algebraic component does not match!' diff --git a/pySDC/tests/test_projects/test_DAE/test_SemiImplicitDAE.py b/pySDC/tests/test_projects/test_DAE/test_SemiImplicitDAE.py new file mode 100644 index 0000000000..ccd37ebec1 --- /dev/null +++ b/pySDC/tests/test_projects/test_DAE/test_SemiImplicitDAE.py @@ -0,0 +1,353 @@ +import pytest +import numpy as np + + +def getTestSetup(problem, sweeper, hook_class): + r""" + Returns the description for the tests. + + Parameters + ---------- + hook_class : list + Hook classes to log statistics such as errors. + + Returns + ------- + description : dict + Contains the parameters for one run. + controller_params : dict + Controller specific parameters. + """ + + level_params = { + 'restol': 1e-13, + } + + problem_params = { + 'newton_tol': 1e-6, + } + + step_params = { + 'maxiter': 60, + } + + controller_params = { + 'logger_level': 30, + 'hook_class': hook_class, + } + + description = { + 'problem_class': problem, + 'problem_params': problem_params, + 'sweeper_class': sweeper, + 'level_params': level_params, + 'step_params': step_params, + } + return description, controller_params + + +@pytest.mark.base +@pytest.mark.parametrize('initial_guess', ['spread', 'zero', 'random']) +def testPredict(initial_guess): + r""" + In this test the predict function of the sweeper is tested. + """ + + from pySDC.projects.DAE.sweepers.SemiImplicitDAE import SemiImplicitDAE + from pySDC.projects.DAE.problems.DiscontinuousTestDAE import DiscontinuousTestDAE + from pySDC.projects.DAE.misc.DAEMesh import DAEMesh + from pySDC.core.Step import step + + description, _ = getTestSetup(DiscontinuousTestDAE, SemiImplicitDAE, []) + + sweeper_params = { + 'quad_type': 'RADAU-RIGHT', + 'num_nodes': 2, + 'QI': 'IE', + 'initial_guess': initial_guess, + } + description.update({'sweeper_params': sweeper_params}) + + level_params = description['level_params'] + level_params.update({'dt': 0.1}) + description.update({'level_params': level_params}) + + S = step(description=description) + L = S.levels[0] + P = L.prob + + assert isinstance(L.sweep, SemiImplicitDAE), "Sweeper cannot instantiate an object of type SemiImplicitDAE!" + + L.status.time = 1.0 + L.u[0] = P.u_exact(L.time) + + L.sweep.predict() + + assert isinstance(L.u[0], DAEMesh), "Initial condition u0 is not of type DAEMesh!" + assert isinstance(L.f[0], DAEMesh), "Initial condition f0 is not of type DAEMesh!" + + assert np.allclose(L.f[0], 0.0), "Gradient at starting time needs to be initialised as zero!" + if initial_guess == 'spread': + uSpread = [L.u[m] for m in range(1, L.sweep.coll.num_nodes)] + fSpread = [L.f[m] for m in range(1, L.sweep.coll.num_nodes)] + assert np.allclose(uSpread, L.u[0]), "Initial condition u0 is not spreaded!" + assert np.allclose(fSpread, L.f[0]), "Gradient needs to be spreaded as zero!" + elif initial_guess == 'zero': + uZero = [L.u[m] for m in range(1, L.sweep.coll.num_nodes)] + fZero = [L.f[m] for m in range(1, L.sweep.coll.num_nodes)] + assert np.allclose(uZero, 0.0), "Initial condition u0 is not spreaded!" + assert np.allclose(fZero, L.f[0]), "Gradient needs to be spreaded as zero!" + elif initial_guess == 'random': + uRandom = [L.u[m] for m in range(1, L.sweep.coll.num_nodes)] + fRandom = [L.f[m] for m in range(1, L.sweep.coll.num_nodes)] + assert all(abs(uRandomItem) > 0.0 for uRandomItem in uRandom), "Initial condition u0 is not spreaded!" + assert all(abs(fRandomItem) > 0.0 for fRandomItem in fRandom), "Gradient needs to be spreaded as zero!" + + +@pytest.mark.base +@pytest.mark.parametrize('residual_type', ['full_abs', 'last_abs', 'full_rel', 'last_rel', 'else']) +def testComputeResidual(residual_type): + r""" + In this test the predict function of the sweeper is tested. + """ + + from pySDC.projects.DAE.sweepers.SemiImplicitDAE import SemiImplicitDAE + from pySDC.projects.DAE.problems.DiscontinuousTestDAE import DiscontinuousTestDAE + from pySDC.core.Step import step + from pySDC.core.Errors import ParameterError + + description, _ = getTestSetup(DiscontinuousTestDAE, SemiImplicitDAE, []) + + sweeper_params = { + 'quad_type': 'RADAU-RIGHT', + 'num_nodes': 2, + 'QI': 'IE', + 'initial_guess': 'spread', + } + description.update({'sweeper_params': sweeper_params}) + + level_params = description['level_params'] + level_params.update({'dt': 0.1}) + level_params.update({'residual_type': residual_type}) + description.update({'level_params': level_params}) + + S = step(description=description) + L = S.levels[0] + P = L.prob + + L.status.time = 1.0 + L.u[0] = P.u_exact(L.time) + L.sweep.predict() + if residual_type == 'else': + with pytest.raises(ParameterError): + L.sweep.compute_residual() + else: + L.sweep.compute_residual() + + uRef = P.u_exact(L.time) + duRef = P.dtype_f(P.init) + + resNormRef = [] + for m in range(L.sweep.coll.num_nodes): + # use abs function from data type here + resNormRef.append(abs(P.eval_f(uRef, duRef, L.time + L.dt * L.sweep.coll.nodes[m]))) + + if residual_type == 'full_abs': + assert L.status.residual == max(resNormRef) + elif residual_type == 'last_abs': + assert L.status.residual == resNormRef[-1] + elif residual_type == 'full_rel': + assert L.status.residual == max(resNormRef) / abs(uRef) + elif residual_type == 'last_rel': + assert L.status.residual == resNormRef[-1] / abs(uRef) + + +@pytest.mark.base +@pytest.mark.parametrize('quad_type', ['RADAU-RIGHT', 'RADAU-LEFT']) +def testComputeEndpoint(quad_type): + r""" + In this test the predict function of the sweeper is tested. + """ + + from pySDC.projects.DAE.sweepers.SemiImplicitDAE import SemiImplicitDAE + from pySDC.projects.DAE.problems.DiscontinuousTestDAE import DiscontinuousTestDAE + from pySDC.core.Step import step + from pySDC.core.Errors import ParameterError + + description, _ = getTestSetup(DiscontinuousTestDAE, SemiImplicitDAE, []) + + sweeper_params = { + 'quad_type': quad_type, + 'num_nodes': 2, + 'QI': 'IE', + 'initial_guess': 'spread', + } + description.update({'sweeper_params': sweeper_params}) + + level_params = description['level_params'] + level_params.update({'dt': 0.1}) + description.update({'level_params': level_params}) + + if quad_type == 'RADAU-LEFT': + with pytest.raises(ParameterError): + S = step(description=description) + with pytest.raises(NotImplementedError): + S.levels[0].sweep.compute_end_point() + else: + S = step(description=description) + + L = S.levels[0] + P = L.prob + + L.status.time = 1.0 + L.u[0] = P.u_exact(L.time) + L.sweep.predict() + + assert isinstance(L.uend, type(None)), "u at end node is not of NoneType!" + + L.sweep.compute_end_point() + + assert np.isclose(L.u[-1], L.uend), "Endpoint is not computed correctly!" + + +@pytest.mark.base +@pytest.mark.parametrize('M', [2, 3]) +def testCompareResults(M): + r""" + Test checks whether the results of the ``fully_implicit_DAE`` sweeper matches + with the ``SemiImplicitDAE`` version. + """ + + from pySDC.projects.DAE.sweepers.SemiImplicitDAE import SemiImplicitDAE + from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE + from pySDC.projects.DAE.problems.DiscontinuousTestDAE import DiscontinuousTestDAE + from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + + descrSI, controller_params = getTestSetup(DiscontinuousTestDAE, SemiImplicitDAE, []) + descrFI, _ = getTestSetup(DiscontinuousTestDAE, fully_implicit_DAE, []) + + sweeper_params = { + 'quad_type': 'RADAU-RIGHT', + 'num_nodes': M, + 'QI': 'IE', + } + descrSI.update({'sweeper_params': sweeper_params}) + descrFI.update({'sweeper_params': sweeper_params}) + + level_paramsSI = descrSI['level_params'] + level_paramsSI.update({'dt': 0.1}) + descrSI.update({'level_params': level_paramsSI}) + + level_paramsFI = descrFI['level_params'] + level_paramsFI.update({'dt': 0.1}) + descrFI.update({'level_params': level_paramsFI}) + + t0 = 1.0 + Tend = 1.1 + + controllerSI = controller_nonMPI(num_procs=1, controller_params=controller_params, description=descrSI) + controllerFI = controller_nonMPI(num_procs=1, controller_params=controller_params, description=descrFI) + + P = controllerSI.MS[0].levels[0].prob + uinit = P.u_exact(t0) + + uendSI, _ = controllerSI.run(u0=uinit, t0=t0, Tend=Tend) + uendFI, _ = controllerFI.run(u0=uinit, t0=t0, Tend=Tend) + + assert np.allclose(uendSI, uendFI), "Values at end time does not match!" + + errSI, errFI = abs(uendSI - P.u_exact(Tend)), abs(uendFI - P.u_exact(Tend)) + assert np.allclose(errSI, errFI), "Errors does not match!" + + +@pytest.mark.base +@pytest.mark.parametrize('case', [0, 1]) +@pytest.mark.parametrize('M', [2, 3]) +@pytest.mark.parametrize('QI', ['IE', 'LU']) +def testOrderAccuracy(case, M, QI): + r""" + In this test, the order of accuracy of the ``SemiImplicitDAE`` sweeper is tested for an index-1 DAE + and an index-2 DAE of semi-explicit form. + """ + + from pySDC.projects.DAE.sweepers.SemiImplicitDAE import SemiImplicitDAE + from pySDC.projects.DAE.problems.DiscontinuousTestDAE import DiscontinuousTestDAE + from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1 + from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + from pySDC.projects.DAE.misc.HookClass_DAE import ( + LogGlobalErrorPostStepDifferentialVariable, + LogGlobalErrorPostStepAlgebraicVariable, + ) + from pySDC.helpers.stats_helper import get_sorted + + problem = { + 0: DiscontinuousTestDAE, + 1: simple_dae_1, + } + + interval = { + 'DiscontinuousTestDAE': (1.0, 1.5), + 'simple_dae_1': (0.0, 0.4), + } + + refOrderDiff = { + 'DiscontinuousTestDAE': 2 * M - 1, + 'simple_dae_1': 2 * M - 1, + } + + # note that for index-2 DAEs there is order reduction in alg. variable + refOrderAlg = { + 'DiscontinuousTestDAE': 2 * M - 1, + 'simple_dae_1': M, + } + + hook_class = [LogGlobalErrorPostStepDifferentialVariable, LogGlobalErrorPostStepAlgebraicVariable] + description, controller_params = getTestSetup(problem[case], SemiImplicitDAE, hook_class) + + sweeper_params = { + 'quad_type': 'RADAU-RIGHT', + 'num_nodes': M, + 'QI': QI, + } + description.update({'sweeper_params': sweeper_params}) + + level_params = description['level_params'] + + intervalCase = interval[problem[case].__name__] + t0, Tend = intervalCase[0], intervalCase[-1] + dt_list = np.logspace(-1.7, -1.0, num=5) + + errorsDiff, errorsAlg = np.zeros(len(dt_list)), np.zeros(len(dt_list)) + for i, dt in enumerate(dt_list): + level_params.update({'dt': dt}) + + controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description) + + P = controller.MS[0].levels[0].prob + uinit = P.u_exact(t0) + + _, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + errorsDiff[i] = max( + np.array(get_sorted(stats, type='e_global_differential_post_step', sortby='time', recomputed=False))[:, 1] + ) + errorsAlg[i] = max( + np.array(get_sorted(stats, type='e_global_algebraic_post_step', sortby='time', recomputed=False))[:, 1] + ) + + orderDiff = np.mean( + [ + np.log(errorsDiff[i] / errorsDiff[i - 1]) / np.log(dt_list[i] / dt_list[i - 1]) + for i in range(1, len(dt_list)) + ] + ) + orderAlg = np.mean( + [np.log(errorsAlg[i] / errorsAlg[i - 1]) / np.log(dt_list[i] / dt_list[i - 1]) for i in range(1, len(dt_list))] + ) + + assert np.isclose( + orderDiff, refOrderDiff[problem[case].__name__], atol=1e0 + ), f"Expected order {refOrderDiff[problem[case].__name__]} in differential variable, got {orderDiff}" + assert np.isclose( + orderAlg, refOrderAlg[problem[case].__name__], atol=1e0 + ), f"Expected order {refOrderAlg[problem[case].__name__]} in algebraic variable, got {orderAlg}" diff --git a/pySDC/tests/test_projects/test_DAE/test_sweeper.py b/pySDC/tests/test_projects/test_DAE/test_fully_implicit_DAE.py similarity index 91% rename from pySDC/tests/test_projects/test_DAE/test_sweeper.py rename to pySDC/tests/test_projects/test_DAE/test_fully_implicit_DAE.py index e2dbd43b21..ff1c0bfb27 100644 --- a/pySDC/tests/test_projects/test_DAE/test_sweeper.py +++ b/pySDC/tests/test_projects/test_DAE/test_fully_implicit_DAE.py @@ -21,7 +21,6 @@ def test_predict_main(): # initialize problem parameters problem_params = dict() problem_params['newton_tol'] = 1e-12 # tollerance for implicit solver - problem_params['nvars'] = 3 # Fill description dictionary for easy hierarchy creation description = dict() @@ -41,10 +40,10 @@ def test_predict_main(): # call prediction function to initialise nodes L.sweep.predict() # check correct initialisation - assert np.array_equal(L.f[0], np.zeros(3)) + assert np.allclose(abs(L.f[0]), 0.0) for i in range(sweeper_params['num_nodes']): - assert np.array_equal(L.u[i + 1], np.zeros(3)) - assert np.array_equal(L.f[i + 1], np.zeros(3)) + assert np.allclose(abs(L.u[i + 1]), 0.0) + assert np.allclose(abs(L.f[i + 1]), 0.0) # rerun check for random initialisation # expecting that random initialisation does not initialise to zero @@ -58,10 +57,10 @@ def test_predict_main(): # compute initial value (using the exact function here) L.u[0] = P.u_exact(L.time) L.sweep.predict() - assert np.array_equal(L.f[0], np.zeros(3)) + assert abs(L.f[0]) == 0.0 for i in range(sweeper_params['num_nodes']): - assert np.not_equal(L.u[i + 1], np.zeros(3)).any() - assert np.not_equal(L.f[i + 1], np.zeros(3)).any() + assert abs(L.u[i + 1]) > 0.0 + assert abs(L.f[i + 1]) > 0.0 @pytest.mark.base @@ -83,7 +82,6 @@ def test_residual_main(): # initialize problem parameters problem_params = dict() problem_params['newton_tol'] = 1e-12 # tollerance for implicit solver - problem_params['nvars'] = 3 # Fill description dictionary for easy hierarchy creation description = dict() @@ -101,8 +99,10 @@ def test_residual_main(): # set reference values u = P.dtype_u(P.init) du = P.dtype_u(P.init) - u[:] = (5, 5, 5) - du[:] = (0, 0, 0) + u.diff[:2] = (5, 5) + u.alg[0] = 5 + du.diff[:2] = (0, 0) + du.alg[0] = 0 # set initial time in the status of the level L.status.time = 0.0 L.u[0] = u @@ -168,7 +168,6 @@ def test_compute_end_point_main(): # initialize problem parameters problem_params = dict() problem_params['newton_tol'] = 1e-12 # tollerance for implicit solver - problem_params['nvars'] = 3 # Fill description dictionary for easy hierarchy creation description = dict() @@ -192,4 +191,4 @@ def test_compute_end_point_main(): L.sweep.compute_end_point() for m in range(1, L.sweep.coll.num_nodes): - assert np.array_equal(L.u[m], L.uend), "ERROR: end point not computed correctly" + assert np.allclose(abs(L.u[m] - L.uend), 0.0), "ERROR: end point not computed correctly" diff --git a/pySDC/tests/test_projects/test_DAE/test_misc.py b/pySDC/tests/test_projects/test_DAE/test_misc.py deleted file mode 100644 index 0ab713951e..0000000000 --- a/pySDC/tests/test_projects/test_DAE/test_misc.py +++ /dev/null @@ -1,20 +0,0 @@ -import pytest - - -# -# Tests that problem class enforces parameter requirements -@pytest.mark.base -def test_problem_class_main(): - from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1 - - # initialize problem parameters - problem_params = dict() - - # instantiate problem - try: - simple_dae_1(**problem_params) - # ensure error thrown is correct - except Exception as error: - assert type(error) == TypeError, "Parameter error was not thrown correctly" - else: - raise Exception("Parameter error was not thrown correctly") diff --git a/pySDC/tests/test_projects/test_DAE/test_problems.py b/pySDC/tests/test_projects/test_DAE/test_problems.py index f291a70fdc..dbc901cfbb 100644 --- a/pySDC/tests/test_projects/test_DAE/test_problems.py +++ b/pySDC/tests/test_projects/test_DAE/test_problems.py @@ -10,15 +10,12 @@ def test_pendulum_u_exact_main(): # initialize problem parameters problem_params = dict() problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver - problem_params['nvars'] = 5 # instantiate problem prob = pendulum_2d(**problem_params) u_test = prob.u_exact(5.0) - assert np.array_equal(u_test, np.zeros(5)) - - u_test = prob.u_exact(5.0) + assert np.isclose(abs(u_test), 0.0) @pytest.mark.base @@ -28,15 +25,12 @@ def test_one_transistor_amplifier_u_exact_main(): # initialize problem parameters problem_params = dict() problem_params['newton_tol'] = 1e-12 # tollerance for implicit solver - problem_params['nvars'] = 5 # instantiate problem prob = one_transistor_amplifier(**problem_params) u_test = prob.u_exact(5.0) - assert np.array_equal(u_test, np.zeros(5)) - - u_test = prob.u_exact(5.0) + assert np.array_equal(abs(u_test), 0.0) @pytest.mark.base @@ -46,15 +40,12 @@ def test_two_transistor_amplifier_u_exact_main(): # initialize problem parameters problem_params = dict() problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver - problem_params['nvars'] = 8 # instantiate problem prob = two_transistor_amplifier(**problem_params) u_test = prob.u_exact(5.0) - assert np.array_equal(u_test, np.zeros(8)) - - u_test = prob.u_exact(5.0) + assert np.isclose(abs(u_test), 0.0) # @@ -65,7 +56,6 @@ def test_pendulum_main(): from pySDC.projects.DAE.problems.simple_DAE import pendulum_2d from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE - from pySDC.projects.DAE.misc.HookClass_DAE import error_hook # initialize level parameters level_params = dict() @@ -80,7 +70,6 @@ def test_pendulum_main(): # initialize problem parameters problem_params = dict() problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver - problem_params['nvars'] = 5 # initialize step parameters step_params = dict() @@ -89,7 +78,6 @@ def test_pendulum_main(): # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - # controller_params['hook_class'] = error_hook # Fill description dictionary for easy hierarchy creation description = dict() @@ -113,10 +101,11 @@ def test_pendulum_main(): # call main function to get things done... uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) - uend_ref = [0.98613917, -0.16592027, 0.29956023, 1.77825875, 4.82500525] - + uend_ref = P.dtype_u(P.init) + uend_ref.diff[:4] = (0.98613917, -0.16592027, 0.29956023, 1.77825875) + uend_ref.alg[0] = 4.82500525 # check error - err = np.linalg.norm(uend - uend_ref, np.inf) + err = abs(uend.diff - uend_ref.diff) assert np.isclose(err, 0.0, atol=1e-4), "Error too large." @@ -125,7 +114,6 @@ def test_one_transistor_amplifier_main(): from pySDC.projects.DAE.problems.transistor_amplifier import one_transistor_amplifier from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE - from pySDC.projects.DAE.misc.HookClass_DAE import error_hook # initialize level parameters level_params = dict() @@ -140,7 +128,6 @@ def test_one_transistor_amplifier_main(): # initialize problem parameters problem_params = dict() problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver - problem_params['nvars'] = 5 # initialize step parameters step_params = dict() @@ -149,7 +136,6 @@ def test_one_transistor_amplifier_main(): # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - # controller_params['hook_class'] = error_hook # Fill description dictionary for easy hierarchy creation description = dict() @@ -174,10 +160,11 @@ def test_one_transistor_amplifier_main(): # call main function to get things done... uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) - uend_ref = [-0.02182035, 3.06674603, 2.89634691, 2.45212382, -2.69727238] + uend_ref = P.dtype_u(P.init) + uend_ref[:] = (-0.02182035, 3.06674603, 2.89634691, 2.45212382, -2.69727238) # check error - err = np.linalg.norm(uend - uend_ref, np.inf) + err = abs(uend - uend_ref) assert np.isclose(err, 0.0, atol=1e-4), "Error too large." @@ -186,7 +173,6 @@ def test_two_transistor_amplifier_main(): from pySDC.projects.DAE.problems.transistor_amplifier import two_transistor_amplifier from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE - from pySDC.projects.DAE.misc.HookClass_DAE import error_hook # initialize level parameters level_params = dict() @@ -201,7 +187,6 @@ def test_two_transistor_amplifier_main(): # initialize problem parameters problem_params = dict() problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver - problem_params['nvars'] = 8 # initialize step parameters step_params = dict() @@ -210,7 +195,6 @@ def test_two_transistor_amplifier_main(): # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - # controller_params['hook_class'] = error_hook # Fill description dictionary for easy hierarchy creation description = dict() @@ -235,7 +219,8 @@ def test_two_transistor_amplifier_main(): # call main function to get things done... uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) - uend_ref = [ + uend_ref = P.dtype_u(P.init) + uend_ref[:] = ( -5.52721527e-03, 3.00630407e00, 2.84974338e00, @@ -244,10 +229,10 @@ def test_two_transistor_amplifier_main(): 2.19430889e00, 5.89240699e00, 9.99531182e-02, - ] + ) # check error - err = np.linalg.norm(uend - uend_ref, np.inf) + err = abs(uend - uend_ref) assert np.isclose(err, 0.0, atol=1e-4), "Error too large." @@ -256,7 +241,6 @@ def test_synchgen_infinite_bus_main(): from pySDC.projects.DAE.problems.synchronous_machine import synchronous_machine_infinite_bus from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE - from pySDC.projects.DAE.misc.HookClass_DAE import error_hook # initialize level parameters level_params = dict() @@ -271,7 +255,6 @@ def test_synchgen_infinite_bus_main(): # initialize problem parameters problem_params = dict() problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver - problem_params['nvars'] = 14 # initialize step parameters step_params = dict() @@ -280,7 +263,6 @@ def test_synchgen_infinite_bus_main(): # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - # controller_params['hook_class'] = error_hook # Fill description dictionary for easy hierarchy creation description = dict() @@ -305,25 +287,29 @@ def test_synchgen_infinite_bus_main(): # call main function to get things done... uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) - uend_ref = [ + uend_ref = P.dtype_u(P.init) + uend_ref.diff[:8] = ( 8.30823565e-01, -4.02584174e-01, 1.16966755e00, 9.47592808e-01, -3.68076863e-01, -3.87492326e-01, + 3.10281509e-01, + 9.94039645e-01, + ) + + uend_ref.alg[:6] = ( -7.77837831e-01, -1.67347611e-01, 1.34810867e00, 5.46223705e-04, 1.29690691e-02, -8.00823474e-02, - 3.10281509e-01, - 9.94039645e-01, - ] + ) # check error - err = np.linalg.norm(uend - uend_ref, np.inf) + err = abs(uend.diff - uend_ref.diff) assert np.isclose(err, 0.0, atol=1e-4), "Error too large." @@ -342,31 +328,37 @@ def test_DiscontinuousTestDAE_singularity(): eps = 1e-3 t_before_event = t_event - eps u_before_event = disc_test_DAE.u_exact(t_before_event) - du_before_event = (np.sinh(t_before_event), np.cosh(t_before_event)) + du_before_event = disc_test_DAE.dtype_f(disc_test_DAE.init) + du_before_event.diff[0] = np.sinh(t_before_event) + du_before_event.alg[0] = np.cosh(t_before_event) f_before_event = disc_test_DAE.eval_f(u_before_event, du_before_event, t_before_event) - assert np.isclose(f_before_event[0], 0.0) and np.isclose( - f_before_event[1], 0.0 - ), f"ERROR: Right-hand side after event does not match! Expected {(0.0, 0.0)}, got {f_before_event}" + assert np.isclose( + abs(f_before_event), 0.0 + ), f"ERROR: Right-hand side after event does not match! Expected {(0.0, 0.0)}, got {f_before_event=}" # test for t <= t^* u_event = disc_test_DAE.u_exact(t_event) - du_event = (np.sinh(t_event), np.cosh(t_event)) + du_event = disc_test_DAE.dtype_f(disc_test_DAE.init) + du_event.diff[0] = np.sinh(t_event) + du_event.alg[0] = np.cosh(t_event) f_event = disc_test_DAE.eval_f(u_event, du_event, t_event) - assert np.isclose(f_event[0], 7 * np.sqrt(51.0)) and np.isclose( - f_event[1], 0.0 - ), f"ERROR: Right-hand side at event does not match! Expected {(7 * np.sqrt(51), 0.0)}, got {f_event}" + assert np.isclose(f_event.diff[0], 7 * np.sqrt(51.0)) and np.isclose( + f_event.alg[0], 0.0 + ), f"ERROR: Right-hand side at event does not match! Expected {(7 * np.sqrt(51), 0.0)}, got {(f_event.diff[0], f_event.alg[0])}" # test for t > t^* by setting t^* = t^* + eps t_after_event = t_event + eps u_after_event = disc_test_DAE.u_exact(t_after_event) - du_after_event = (np.sinh(t_event), np.cosh(t_event)) + du_after_event = disc_test_DAE.dtype_f(disc_test_DAE.init) + du_after_event.diff[0] = np.sinh(t_event) + du_after_event.alg[0] = np.cosh(t_event) f_after_event = disc_test_DAE.eval_f(u_after_event, du_after_event, t_after_event) - assert np.isclose(f_after_event[0], 7 * np.sqrt(51.0)) and np.isclose( - f_after_event[1], 0.0 - ), f"ERROR: Right-hand side after event does not match! Expected {(7 * np.sqrt(51), 0.0)}, got {f_after_event}" + assert np.isclose(f_after_event.diff[0], 7 * np.sqrt(51.0)) and np.isclose( + f_after_event.alg[0], 0.0 + ), f"ERROR: Right-hand side after event does not match! Expected {(7 * np.sqrt(51), 0.0)}, got {(f_after_event.diff[0], f_after_event.alg[0])}" @pytest.mark.base @@ -431,12 +423,12 @@ def test_DiscontinuousTestDAE_SDC(M): uend, _ = controller.run(u0=uinit, t0=t0, Tend=Tend) - err = abs(uex[0] - uend[0]) - assert err < err_tol[M], f"ERROR: Error is too large! Expected {err_tol[M]}, got {err}" + err = abs(uex.diff[0] - uend.diff[0]) + assert err < err_tol[M], f"ERROR: Error is too large! Expected {err_tol[M]=}, got {err=}" @pytest.mark.base -@pytest.mark.parametrize('M', [2, 3, 4, 5]) +@pytest.mark.parametrize('M', [3, 4, 5]) def test_DiscontinuousTestDAE_SDC_detection(M): """ Test for one SDC run with event detection if the found event is close to the exact value and if the global error @@ -450,22 +442,14 @@ def test_DiscontinuousTestDAE_SDC_detection(M): from pySDC.projects.PinTSimE.switch_estimator import SwitchEstimator from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestartingNonMPI - err_tol = { - 2: 5.3952e-9, - 3: 2.6741e-9, - 4: 1.9163e-8, - 5: 2.4791e-8, - } - event_err_tol = { - 2: 3.6968e-5, - 3: 1.3496e-8, - 4: 0.02, - 5: 0.0101, + 3: 0.02, + 4: 5e-10, + 5: 1e-10, } level_params = { - 'restol': 1e-13, + 'restol': 1e-10, 'dt': 1e-2, } @@ -489,7 +473,7 @@ def test_DiscontinuousTestDAE_SDC_detection(M): switch_estimator_params = { 'tol': 1e-10, - 'alpha': 0.95, + 'alpha': 0.96, } restarting_params = { @@ -522,9 +506,8 @@ def test_DiscontinuousTestDAE_SDC_detection(M): uex = P.u_exact(Tend) uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) - - err = abs(uex[0] - uend[0]) - assert err < err_tol[M], f"ERROR for M={M}: Error is too large! Expected {err_tol[M]}, got {err}" + err = abs(uex.diff[0] - uend.diff[0]) + assert err < 2e-9, f"ERROR for M={M}: Error is too large! Expected something lower than {2e-9}, got {err=}" switches = get_sorted(stats, type='switch', sortby='time', recomputed=False) assert len(switches) >= 1, 'ERROR for M={M}: No events found!' @@ -535,7 +518,7 @@ def test_DiscontinuousTestDAE_SDC_detection(M): event_err = abs(t_switch_exact - t_switch) assert ( event_err < event_err_tol[M] - ), f"ERROR for M={M}: Event error is too large! Expected {event_err_tol[M]}, got {event_err}" + ), f"ERROR for M={M}: Event error is too large! Expected {event_err_tol[M]=}, got {event_err=}" @pytest.mark.base @@ -557,21 +540,11 @@ def test_WSCC9_evaluation(): # test if right-hand side of does have the correct length t0 = 0.0 u0 = WSCC9.u_exact(t0) - du0 = np.zeros(len(u0)) + du0 = WSCC9.dtype_f(WSCC9.init, val=0.0) f = WSCC9.eval_f(u0, du0, t0) - assert len(f) == nvars, 'Shape of f does not match with shape it is supposed to be!' - - # test if ParameterError is raised if m != 3 or n != 9 is set - problem_params.update( - { - 'm': 4, - 'n': 8, - } - ) - with pytest.raises(ParameterError): - WSCC9_test = WSCC9BusSystem(**problem_params) + assert len(f.diff) == nvars and len(f.alg) == nvars, 'Shape of f does not match with shape it is supposed to be!' @pytest.mark.base @@ -630,7 +603,7 @@ def test_WSCC9_update_YBus(): assert np.allclose(YBus_initial, YBus_initial_ref), 'YBus does not match with the YBus at initialization!' - uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + _, _ = controller.run(u0=uinit, t0=t0, Tend=Tend) YBus_line_outage = P.YBus YBus_line6_8_outage = get_event_Ybus() @@ -664,7 +637,7 @@ def test_WSCC9_SDC_detection(): sweeper_params = { 'quad_type': 'RADAU-RIGHT', - 'num_nodes': 2, + 'num_nodes': 3, 'QI': 'LU', } @@ -678,7 +651,7 @@ def test_WSCC9_SDC_detection(): switch_estimator_params = { 'tol': 1e-10, - 'alpha': 0.95, + 'alpha': 0.97, } restarting_params = { @@ -709,12 +682,14 @@ def test_WSCC9_SDC_detection(): P = controller.MS[0].levels[0].prob uinit = P.u_exact(t0) - uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + _, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) switches = get_sorted(stats, type='switch', sortby='time', recomputed=False) assert len(switches) >= 1, 'ERROR: No events found!' t_switch = [me[1] for me in switches][0] - assert np.isclose(t_switch, 0.6103290792685618, atol=1e-3), 'Found event does not match a threshold!' + assert np.isclose( + t_switch, 0.528458886745887, atol=1e-3 + ), f'Found event does not match a threshold! Got {t_switch=}' # @pytest.mark.base diff --git a/pySDC/tests/test_projects/test_monodomain/test_monodomain_convergence.py b/pySDC/tests/test_projects/test_monodomain/test_monodomain_convergence.py new file mode 100644 index 0000000000..571d6a7e25 --- /dev/null +++ b/pySDC/tests/test_projects/test_monodomain/test_monodomain_convergence.py @@ -0,0 +1,193 @@ +import pytest + + +def run_monodomain_convergence( + dt_max, n_dt, expected_convergence_rate, convergence_rate_tolerance, compute_init_val, compute_ref_sol, **opts +): + from pySDC.projects.Monodomain.run_scripts.run_MonodomainODE import setup_and_run + + opts["num_sweeps"] = [1] + + dt_list = [dt_max / 2**i for i in range(n_dt)] + + # skip residual computation at coarser levels (if any) + opts["skip_residual_computation"] = True + + # interpolate or recompute rhs on fine level + opts["finter"] = False + + # set time parallelism to True or emulated (False) + opts["truly_time_parallel"] = False + + # set monodomain parameters + opts["domain_name"] = "cuboid_1D_small" # small problem for this pytest + opts["refinements"] = [0] + opts["order"] = 2 # 2 or 4 + opts["ionic_model_name"] = ( + "TTP_SMOOTH" # a smoothed ionic model, the original TTP model has (very small) discontinuities due if-else statements in its implementation + ) + opts["enable_output"] = False + opts["write_database"] = False + + opts["output_root"] = "results_convergence" + + # save some values for later + opts_bak = opts.copy() + + # In order to initiate an action potential the monodomain problem needs a stimulus. In our code the stimulus is a step function. + # Due to its non smoothness we dont want to use it in the convergence test. Therefore we first generate an initial value, + # using the step function, and then we use this initial value as the initial value for the convergence test. In that way the non smooth + # stimulus is not used in the convergence test. + + # First, compute an initial value for the convergence test. + opts["dt"] = 0.1 + opts["restol"] = 5e-8 # residual tolerance, doesn't need to be very small for the initial value + opts["read_init_val"] = False + opts["init_time"] = 0.0 + opts["end_time"] = 3.0 + opts["write_as_reference_solution"] = True # write the initial value + opts["write_all_variables"] = True # write all variables, not only the potential + opts["output_file_name"] = "init_val_DCT" + opts["ref_sol"] = "" + if compute_init_val: + print("Computing initial value for the convergence test...") + err, rel_err, avg_niters, times, niters, residuals = setup_and_run(**opts) + + # Second, compute a reference solution for the convergence test. + opts["dt"] = dt_list[-1] / 4.0 + opts["restol"] = 1e-14 # residual tolerance, very small to no pollute convergence + opts["read_init_val"] = True + opts["init_time"] = 3.0 # start at t0=3 + opts["end_time"] = opts_bak["end_time"] # end at t = t0+end_time + opts["write_as_reference_solution"] = True # write as reference solution + opts["write_all_variables"] = ( + False # write only the potential. The other ionic model variables are not taken in account in the convergence test. + ) + opts["output_file_name"] = "ref_sol" + if compute_ref_sol: + print("Computing reference solution for the convergence test...") + err, rel_err, avg_niters, times, niters, residuals = setup_and_run(**opts) + + # Third, run the convergence test + opts["write_as_reference_solution"] = False + opts["write_all_variables"] = False + opts["ref_sol"] = "ref_sol" + + print("Running convergence test...") + rel_err = [0.0] * n_dt + for i, dt in enumerate(dt_list): + print(f"Iteration {i} of {n_dt}...") + opts["dt"] = dt + opts["output_file_name"] = "monodomain_dt_" + str(dt).replace(".", "p") + err, rel_err[i], avg_niters, times, niters, residuals = setup_and_run(**opts) + + import numpy as np + + rates = np.zeros(n_dt - 1) + for i in range(n_dt - 1): + rates[i] = np.log(rel_err[i] / rel_err[i + 1]) / np.log(dt_list[i] / dt_list[i + 1]) + + print("\nConvergence test results") + print(f"Relative errors: {rel_err}") + print(f"Rates: {rates}") + + assert np.all(rates > expected_convergence_rate - convergence_rate_tolerance), "ERROR: convergence rate is too low!" + + return dt_list, rel_err + + +@pytest.mark.monodomain +def test_monodomain_convergence_ESDC_TTP(): + max_iter_6_dt, max_iter_6_rel_err = run_monodomain_convergence( + dt_max=0.2, + n_dt=5, + expected_convergence_rate=6.0, + convergence_rate_tolerance=1.0, + compute_init_val=True, + compute_ref_sol=True, + integrator="IMEXEXP_EXPRK", + num_nodes=[6], + max_iter=6, + n_time_ranks=1, + end_time=0.2, + ) + + max_iter_3_dt, max_iter_3_rel_err = run_monodomain_convergence( + dt_max=0.2, + n_dt=5, + expected_convergence_rate=3.0, + convergence_rate_tolerance=0.5, + compute_init_val=False, + compute_ref_sol=False, + integrator="IMEXEXP_EXPRK", + num_nodes=[6], + max_iter=3, + n_time_ranks=1, + end_time=0.2, + ) + + import numpy as np + + max_iter_3_dt = np.array(max_iter_3_dt) + max_iter_3_rel_err = np.array(max_iter_3_rel_err) + max_iter_6_dt = np.array(max_iter_6_dt) + max_iter_6_rel_err = np.array(max_iter_6_rel_err) + + import pySDC.helpers.plot_helper as plt_helper + + plt_helper.setup_mpl() + plt_helper.newfig(textwidth=238.96, scale=0.89) + + lw = 1.5 + colors = ["C0", "C1", "C2", "C3", "C4"] + markers = ["o", "x", "s", "D", "^"] + + plt_helper.plt.loglog( + max_iter_3_dt, + max_iter_3_rel_err, + label="$k=3$", + lw=lw, + linestyle="-", + color=colors[0], + marker=markers[0], + markerfacecolor="none", + markeredgewidth=1.2, + markersize=7.5, + ) + plt_helper.plt.loglog( + max_iter_6_dt, + max_iter_6_rel_err, + label="$k=6$", + lw=lw, + linestyle="-", + color=colors[1], + marker=markers[1], + markerfacecolor="none", + markeredgewidth=1.2, + markersize=7.5, + ) + plt_helper.plt.loglog( + max_iter_3_dt, + 0.1 * np.min(max_iter_3_rel_err) * (max_iter_3_dt / max_iter_3_dt[-1]) ** 3, + linewidth=2, + linestyle="--", + color="k", + label=r"$\mathcal{{O}}(\Delta t^3)$", + ) + plt_helper.plt.loglog( + max_iter_6_dt, + 0.1 * np.min(max_iter_6_rel_err) * (max_iter_6_dt / max_iter_6_dt[-1]) ** 6, + linewidth=2, + linestyle="-", + color="k", + label=r"$\mathcal{{O}}(\Delta t^6)$", + ) + plt_helper.plt.legend(loc="lower right", ncol=1) + plt_helper.plt.ylabel('rel. err.') + plt_helper.plt.xlabel(r"$\Delta t$") + plt_helper.plt.grid() + plt_helper.savefig("data/convergence_ESDC_fixed_iter", save_pdf=False, save_pgf=False, save_png=True) + + +if __name__ == "__main__": + test_monodomain_convergence_ESDC_TTP() diff --git a/pySDC/tests/test_projects/test_monodomain/test_monodomain_iterations.py b/pySDC/tests/test_projects/test_monodomain/test_monodomain_iterations.py new file mode 100644 index 0000000000..e588d5b2fe --- /dev/null +++ b/pySDC/tests/test_projects/test_monodomain/test_monodomain_iterations.py @@ -0,0 +1,120 @@ +import pytest + + +def check_iterations(expected_avg_niters, **opts): + from pySDC.projects.Monodomain.run_scripts.run_MonodomainODE import setup_and_run + + # define sweeper parameters + opts["integrator"] = "IMEXEXP_EXPRK" + opts["num_sweeps"] = [1] + + # set step parameters + opts["max_iter"] = 100 + + # set level parameters + opts["dt"] = 0.025 + + opts["restol"] = 5e-8 # residual tolerance + + # skip residual computation at coarser levels (if any) + opts["skip_residual_computation"] = True + + # interpolate or recompute rhs on fine level + opts["finter"] = True + + # set time parallelism to True or emulated (False) + opts["truly_time_parallel"] = False + + # set monodomain parameters + opts["order"] = 4 # 2 or 4 + opts["enable_output"] = False + opts["write_database"] = False + + opts["output_root"] = "results_iterations_pytest" + + opts["read_init_val"] = False + opts["init_time"] = 0.0 + opts["end_time"] = 2.0 + opts["write_as_reference_solution"] = False + opts["write_all_variables"] = False + opts["output_file_name"] = "monodomain" + opts["ref_sol"] = "" + + err, rel_err, avg_niters, times, niters, residuals = setup_and_run(**opts) + + print(f"Got average number of iterations {avg_niters}, expected was {expected_avg_niters}") + + assert avg_niters == pytest.approx( + expected_avg_niters, rel=0.1 + ), f"Average number of iterations {avg_niters} too different from the expected {expected_avg_niters}" + + +# Many of the following are commented since they test features already tested in other tests +# If you reactivate them the number of expected iterations should be updated + + +@pytest.mark.monodomain +def test_monodomain_iterations_ESDC_BS(): + check_iterations( + domain_name="cuboid_2D_small", + num_nodes=[6, 3], + refinements=[0, -1], + ionic_model_name="BS", + n_time_ranks=4, + expected_avg_niters=3.3209876543209877, + ) + + +# @pytest.mark.monodomain +# def test_monodomain_iterations_MLESDC_BS(): +# check_iterations(num_nodes=[6, 3], ionic_model_name="BS", expected_avg_niters=2.03125) + + +@pytest.mark.monodomain +def test_monodomain_iterations_ESDC_HH(): + check_iterations( + domain_name="cuboid_2D_small", + num_nodes=[6, 3], + refinements=[0, -1], + ionic_model_name="HH", + n_time_ranks=2, + expected_avg_niters=3.074074074074074, + ) + + +# @pytest.mark.monodomain +# def test_monodomain_iterations_MLESDC_HH(): +# check_iterations(num_nodes=[6, 3], ionic_model_name="HH", expected_avg_niters=2.80625) + + +@pytest.mark.monodomain +def test_monodomain_iterations_ESDC_CRN(): + check_iterations( + domain_name="cube_1D", + num_nodes=[6], + refinements=[0], + ionic_model_name="CRN", + n_time_ranks=1, + expected_avg_niters=3.382716, + ) + + +# @pytest.mark.monodomain +# def test_monodomain_iterations_MLESDC_CRN(): +# check_iterations(num_nodes=[6, 3], ionic_model_name="CRN", expected_avg_niters=2.3625) + + +# @pytest.mark.monodomain +# def test_monodomain_iterations_ESDC_TTP(): +# check_iterations(num_nodes=[6], ionic_model_name="TTP", expected_avg_niters=3.60625) + + +# @pytest.mark.monodomain +# def test_monodomain_iterations_MLESDC_TTP(): +# check_iterations(num_nodes=[6, 3], ionic_model_name="TTP", expected_avg_niters=2.90625) + + +# if __name__ == "__main__": +# test_monodomain_iterations_ESDC_BS() +# test_monodomain_iterations_ESDC_HH() +# test_monodomain_iterations_ESDC_CRN() diff --git a/pySDC/tests/test_projects/test_monodomain/test_monodomain_iterations_parallel.py b/pySDC/tests/test_projects/test_monodomain/test_monodomain_iterations_parallel.py new file mode 100644 index 0000000000..feab5126e9 --- /dev/null +++ b/pySDC/tests/test_projects/test_monodomain/test_monodomain_iterations_parallel.py @@ -0,0 +1,273 @@ +import pytest +import os +import subprocess + + +def plot_iter_info(iters_info_list, labels_list, key1, key2, logy, xlabel, ylabel, ymin, ymax, title, output_file_name): + + markers = ["o", "x", "s", "D", "v", "^", "<", ">", "p", "h", "H", "*", "+", "X", "d", "|", "_"] + colors = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"] + + import pySDC.helpers.plot_helper as plt_helper + + plt_helper.setup_mpl() + plt_helper.newfig(textwidth=238.96, scale=0.89) + + lw = 1.5 + colors = ["C0", "C1", "C2", "C3", "C4"] + markers = ["o", "x", "s", "D", "^"] + + if logy: + plt_helper.plt.yscale("log", base=10) + + for i, (iters_info, label) in enumerate(zip(iters_info_list, labels_list)): + plt_helper.plt.plot( + iters_info[key1], + iters_info[key2], + label=label, + lw=lw, + color=colors[i], + marker=markers[i], + markerfacecolor="none", + markeredgewidth=1.2, + markersize=7.5, + ) + + if ymin is not None and ymax is not None: + plt_helper.plt.set_ylim([ymin, ymax]) + + plt_helper.plt.legend(loc="lower right", ncol=1) + plt_helper.plt.ylabel(ylabel) + plt_helper.plt.xlabel(xlabel) + plt_helper.plt.title(title) + plt_helper.plt.grid() + plt_helper.savefig("data/" + output_file_name, save_pdf=False, save_pgf=False, save_png=True) + + +def options_command(options): + cmd = "" + for key, val in options.items(): + if type(val) is list: + opt = key + if type(val[0]) is int: + arg = ",".join([str(v).replace("-", "_") for v in val]) + else: + arg = ",".join([map(str, val)]) + elif type(val) is bool: + if not val: + opt = "no-" + key + else: + opt = key + arg = "" + else: + opt = key + arg = str(val) + cmd = cmd + " --" + opt + (" " + arg if arg != "" else "") + return cmd + + +def generate_initial_value(ionic_model_name): + from pySDC.projects.Monodomain.run_scripts.run_MonodomainODE import setup_and_run + + opts = dict() + + # define sweeper parameters + opts["integrator"] = "IMEXEXP_EXPRK" + opts["num_nodes"] = [5] + opts["num_sweeps"] = [1] + + # set step parameters + opts["max_iter"] = 100 + + # set level parameters + opts["dt"] = 0.1 + + opts["restol"] = 5e-8 # residual tolerance + + opts["truly_time_parallel"] = False + opts["n_time_ranks"] = 1 + + # skip residual computation at coarser levels (if any) + opts["skip_residual_computation"] = True + + # interpolate or recompute rhs on fine level + opts["finter"] = False + + # set monodomain parameters + opts["domain_name"] = "cuboid_1D_small" + opts["ionic_model_name"] = ionic_model_name + opts["refinements"] = [0] + opts["order"] = 4 # 2 or 4 + + opts["enable_output"] = False + opts["write_database"] = False + + opts["output_root"] = "results_iterations_parallel" + + opts["read_init_val"] = False + opts["init_time"] = 0.0 + opts["end_time"] = 6.0 + opts["write_as_reference_solution"] = True + opts["write_all_variables"] = True + opts["output_file_name"] = "init_val_DCT" + opts["ref_sol"] = "" + + err, rel_err, avg_niters, times, niters, residuals = setup_and_run(**opts) + + +def check_iterations_parallel(expected_avg_niters, **options): + # define sweeper parameters + + options["num_sweeps"] = [1] + + # set step parameters + options["max_iter"] = 100 + options["dt"] = 0.025 + + # set level parameters + options["restol"] = 5e-8 + + options["end_time"] = 0.6 + + # set problem parameters + options["domain_name"] = "cuboid_1D_small" + options["refinements"] = [0] + options["order"] = 4 + options["read_init_val"] = True + options["init_time"] = 3.0 + options["enable_output"] = False + options["write_as_reference_solution"] = False + options["write_all_variables"] = False + options["output_file_name"] = "monodomain" + options["output_root"] = "results_iterations_parallel" + options["skip_res"] = True + options["finter"] = False + options["write_database"] = True + + my_env = os.environ.copy() + my_env['PYTHONPATH'] = '.:../../../..' + my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml' + cwd = "pySDC/projects/Monodomain/run_scripts" + + # base_python_command = "coverage run -p run_MonodomainODE_cli.py" + base_python_command = "coverage run -p " + cwd + "/run_MonodomainODE_cli.py" + cmd = f"mpirun -n {options['n_time_ranks']} " + base_python_command + " " + options_command(options) + + print(f"Running command: {cmd}") + + process = subprocess.Popen( + args=cmd.split(), + text=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=my_env, + cwd=".", + ) + + while True: + output = process.stdout.readline() + if output == '' and process.poll() is not None: + break + if output: + print(output.strip()) + + process.wait() + + assert ( + process.returncode == 0 + ), f"ERROR: did not get return code 0, got {process.returncode} with {options['n_time_ranks']} processes" + + # read the generated data + executed_file_dir = os.path.dirname(os.path.realpath(__file__)) + file_name = ( + executed_file_dir + + "/../../../../data/" + + options["output_root"] + + "/" + + options["domain_name"] + + "/ref_" + + str(options["refinements"][0]) + + "/" + + options["ionic_model_name"] + + "/" + + options["output_file_name"] + ) + from pySDC.projects.Monodomain.utils.data_management import database + + data_man = database(file_name) + # errors = data_man.read_dictionary("errors") + iters_info = data_man.read_dictionary("iters_info") + + print(f"Got average number of iterations {iters_info['avg_niters']}, expected was {expected_avg_niters}") + + assert iters_info['avg_niters'] == pytest.approx( + expected_avg_niters, rel=0.1 + ), f"Average number of iterations {iters_info['avg_niters']} too different from the expected {expected_avg_niters}" + + return iters_info + + +@pytest.mark.monodomain +def test_monodomain_iterations_ESDC_MLESDC_PFASST(): + + generate_initial_value(ionic_model_name="TTP") + + ESDC_iters_info = check_iterations_parallel( + integrator="IMEXEXP_EXPRK", + num_nodes=[8], + ionic_model_name="TTP", + truly_time_parallel=True, + n_time_ranks=1, + expected_avg_niters=3.58333, + ) + + MLESDC_iters_info = check_iterations_parallel( + integrator="IMEXEXP_EXPRK", + num_nodes=[8, 4], + ionic_model_name="TTP", + truly_time_parallel=True, + n_time_ranks=1, + expected_avg_niters=2.0, + ) + + PFASST_iters_info = check_iterations_parallel( + integrator="IMEXEXP_EXPRK", + num_nodes=[8, 4], + ionic_model_name="TTP", + truly_time_parallel=True, + n_time_ranks=24, + expected_avg_niters=3.0, + ) + + iters_info_list = [ESDC_iters_info, MLESDC_iters_info, PFASST_iters_info] + labels_list = ["ESDC", "MLESDC", "PFASST"] + plot_iter_info( + iters_info_list, + labels_list, + key1='times', + key2='niters', + logy=False, + xlabel="$t$", + ylabel=r"\# iter", + ymin=None, + ymax=None, + title="Number of iterations", + output_file_name="niter_VS_time", + ) + plot_iter_info( + iters_info_list, + labels_list, + key1='times', + key2='residuals', + logy=True, + xlabel="$t$", + ylabel="residual", + ymin=None, + ymax=None, + title="Residual over time", + output_file_name="res_VS_time", + ) + + +if __name__ == "__main__": + test_monodomain_iterations_ESDC_MLESDC_PFASST() diff --git a/pySDC/tests/test_projects/test_monodomain/test_monodomain_stability_domain.py b/pySDC/tests/test_projects/test_monodomain/test_monodomain_stability_domain.py new file mode 100644 index 0000000000..73fb53e719 --- /dev/null +++ b/pySDC/tests/test_projects/test_monodomain/test_monodomain_stability_domain.py @@ -0,0 +1,68 @@ +import pytest + + +@pytest.mark.monodomain +def test_monodomain_stability_ESDC(): + from pySDC.projects.Monodomain.run_scripts.run_TestODE import main + + main( + integrator="IMEXEXP_EXPRK", + dl=2, + l_min=-100, + openmp=True, + n_time_ranks=1, + end_time=1.0, + num_nodes=[5], + check_stability=True, + ) + + # This is to generate the image only, we do not check for stabiltiy since we already know that + # SDC is unstable for this problem + main( + integrator="IMEXEXP", + dl=2, + l_min=-100, + openmp=True, + n_time_ranks=1, + end_time=1.0, + num_nodes=[5], + check_stability=False, + ) + + +# @pytest.mark.monodomain +# def test_monodomain_stability_MLESDC(): +# from pySDC.projects.Monodomain.run_scripts.run_TestODE import main + +# main( +# integrator="IMEXEXP_EXPRK", +# dl=2, +# l_min=-100, +# openmp=True, +# n_time_ranks=1, +# end_time=1.0, +# num_nodes=[5, 3], +# check_stability=True, +# ) + + +# @pytest.mark.monodomain +# def test_monodomain_stability_PFASST(): +# from pySDC.projects.Monodomain.run_scripts.run_TestODE import main + +# main( +# integrator="IMEXEXP_EXPRK", +# dl=2, +# l_min=-100, +# openmp=True, +# n_time_ranks=4, +# end_time=1.0, +# num_nodes=[5, 3], +# check_stability=True, +# ) + + +# if __name__ == "__main__": +# test_monodomain_stability_ESDC() +# test_monodomain_stability_MLESDC() +# test_monodomain_stability_PFASST() diff --git a/pySDC/tests/test_projects/test_parallelSDC_reloaded.py b/pySDC/tests/test_projects/test_parallelSDC_reloaded.py new file mode 100644 index 0000000000..8614f4716b --- /dev/null +++ b/pySDC/tests/test_projects/test_parallelSDC_reloaded.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Feb 4 09:53:34 2024 + +Testing scripts for the parallel_SDC_reloaded project +""" +import sys +import pytest + + +@pytest.mark.base +@pytest.mark.parametrize( + "sName", + [ + "convergence", + "nilpotency", + "stability", + ], +) +def test_script(sName): + try: + exec(f"from pySDC.projects.parallelSDC_reloaded import {sName}") + except Exception as e: + raise ImportError(f"error when executing {sName}.py : {e}") + + +@pytest.mark.base +@pytest.mark.parametrize("sType", ["setup", "accuracy"]) +@pytest.mark.parametrize( + "pName", + [ + "allenCahn", + "chemicalReaction", + "jacobiElliptic", + "kaps", + "lorenz", + "protheroRobinson", + "protheroRobinsonAutonomous", + "vanderpol", + ], +) +def test_playgrounds(pName, sType): + try: + exec(f"from pySDC.projects.parallelSDC_reloaded import {pName}_{sType}") + except Exception as e: + raise ImportError(f"error when executing {pName}_{sType}.py : {e}") + + +@pytest.mark.base +def test_script_fig01_conv(): + from pySDC.projects.parallelSDC_reloaded.scripts import fig01_conv + + assert fig01_conv.config == [ + (4, "RADAU-RIGHT", "MIN-SR-NS"), + (5, "LOBATTO", "MIN-SR-NS"), + (4, "RADAU-RIGHT", "MIN-SR-S"), + (5, "LOBATTO", "MIN-SR-S"), + (4, "RADAU-RIGHT", "MIN-SR-FLEX"), + (5, "LOBATTO", "MIN-SR-FLEX"), + ] + + +@pytest.mark.base +def test_script_fig02_stab(): + from pySDC.projects.parallelSDC_reloaded.scripts import fig02_stab + + assert fig02_stab.config == [ + "PIC", + "MIN-SR-NS", + "MIN-SR-S", + "MIN-SR-FLEX", + "LU", + "VDHS", + ] + + +@pytest.mark.base +def test_script_fig03_lorenz(): + from pySDC.projects.parallelSDC_reloaded.scripts import fig03_lorenz + + minPrec = fig03_lorenz.minPrec + assert minPrec == ["MIN-SR-NS", "MIN-SR-S", "MIN-SR-FLEX"] + assert fig03_lorenz.config == [ + [(*minPrec, "LU", "EE", "PIC"), 4], + [(*minPrec, "VDHS", "RK4", "ESDIRK43"), 4], + [(*minPrec, "PIC", "RK4", "ESDIRK43"), 5], + ] + + +@pytest.mark.base +def test_script_fig04_protheroRobinson(): + from pySDC.projects.parallelSDC_reloaded.scripts import fig04_protheroRobinson + + minPrec = fig04_protheroRobinson.minPrec + assert minPrec == ["MIN-SR-NS", "MIN-SR-S", "MIN-SR-FLEX"] + assert fig04_protheroRobinson.config == [ + [(*minPrec, "VDHS", "ESDIRK43", "LU"), 4], + [(*minPrec, "VDHS", "ESDIRK43", "LU"), 6], + ] + + +@pytest.mark.base +def test_script_fig05_allenCahn(): + # Test fails for python < 3.8, so avoid it + if sys.version_info.minor < 8: + return + + from pySDC.projects.parallelSDC_reloaded.scripts import fig05_allenCahn + + minPrec = fig05_allenCahn.minPrec + assert minPrec == ["MIN-SR-NS", "MIN-SR-S", "MIN-SR-FLEX"] + assert fig05_allenCahn.config == [ + (*minPrec, "VDHS", "ESDIRK43", "LU"), + ] diff --git a/pySDC/tests/test_projects/test_pintsime/test_SwitchEstimator.py b/pySDC/tests/test_projects/test_pintsime/test_SwitchEstimator.py new file mode 100644 index 0000000000..4de3df9db5 --- /dev/null +++ b/pySDC/tests/test_projects/test_pintsime/test_SwitchEstimator.py @@ -0,0 +1,405 @@ +import numpy as np +import pytest + + +def getParamsRun(): + r""" + Returns parameters for conroller run that are used in each test. + """ + restol = -1 + alpha = 0.95 + maxiter = 1 + max_restarts = 3 + useA = False + useSE = True + exact_event_time_avail = True + return restol, alpha, maxiter, max_restarts, useA, useSE, exact_event_time_avail + + +@pytest.mark.base +def testExactDummyProblem(): + r""" + Test for dummy problems. The test verifies that the dummy problems exactly returns the dynamics of + the parent class. ``eval_f`` of ``ExactDiscontinuousTestDAE`` is not tested here, since it only returns + a random right-hand side to enforce the sweeper to do not stop to compute. + """ + + from pySDC.implementations.problem_classes.DiscontinuousTestODE import ( + DiscontinuousTestODE, + ExactDiscontinuousTestODE, + ) + + childODE = ExactDiscontinuousTestODE(**{}) + parentODE = DiscontinuousTestODE(**{}) + assert childODE.t_switch_exact == parentODE.t_switch_exact, "Exact event times between classes does not match!" + + t0 = 1.0 + dt = 0.1 + u0 = parentODE.u_exact(t0) + rhs = u0.copy() + + uSolve = childODE.solve_system(rhs, dt, u0, t0) + uExact = parentODE.u_exact(t0) + assert np.allclose(uSolve, uExact) + + # same test for event time + tExactEventODE = parentODE.t_switch_exact + dt = 0.1 + u0Event = parentODE.u_exact(tExactEventODE) + rhsEvent = u0.copy() + + uSolveEvent = childODE.solve_system(rhsEvent, dt, u0Event, tExactEventODE) + uExactEvent = parentODE.u_exact(tExactEventODE) + assert np.allclose(uSolveEvent, uExactEvent) + + fExactOde = childODE.eval_f(u0, t0) + fOde = parentODE.eval_f(u0, t0) + assert np.allclose(fExactOde, fOde), "Right-hand sides do not match!" + + fExactOdeEvent = childODE.eval_f(u0Event, tExactEventODE) + fOdeEvent = parentODE.eval_f(u0Event, tExactEventODE) + assert np.allclose(fExactOdeEvent, fOdeEvent), "Right-hand sides at event do not match!" + + +@pytest.mark.base +@pytest.mark.parametrize('quad_type', ['LOBATTO', 'RADAU-RIGHT']) +def testAdaptInterpolationInfo(quad_type): + r""" + Tests if the method ``adapt_interpolation_info`` does what it is supposed to do. + + - For ``quad_type='RADAU-RIGHT'``, the value at ``t0`` has to be added to the list of + ``state_function``, since it is no collocation node but can be used for the interpolation + anyway. + + - For ``quad_type='LOBATTO'``, the first value in ``state_function`` has to be removed + since ``t0`` is also a collocation node here and the state function would include double values. + + Parameters + ---------- + quad_type : str + Type of quadrature used. + """ + + from pySDC.projects.PinTSimE.battery_model import generateDescription + from pySDC.projects.PinTSimE.switch_estimator import SwitchEstimator + from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit + from pySDC.implementations.problem_classes.DiscontinuousTestODE import ExactDiscontinuousTestODE + + problem = ExactDiscontinuousTestODE + problem_params = dict() + t0 = 1.6 + Tend = problem(**problem_params).t_switch_exact + eps = 1e-13 # choose this eps to enforce a sign chance in state function + dt = (Tend - t0) + eps + + sweeper = generic_implicit + num_nodes = 3 + QI = 'IE' + + tol = 1e-10 + + restol, alpha, maxiter, max_restarts, useA, useSE, _ = getParamsRun() + + hook_class = [] + + _, _, controller = generateDescription( + dt=dt, + problem=problem, + sweeper=sweeper, + num_nodes=num_nodes, + quad_type=quad_type, + QI=QI, + hook_class=hook_class, + use_adaptivity=useA, + use_switch_estimator=useSE, + problem_params=problem_params, + restol=restol, + maxiter=maxiter, + max_restarts=max_restarts, + tol_event=tol, + alpha=alpha, + ) + + S = controller.MS[0] + L = S.levels[0] + P = L.prob + + # instance of switch estimator + SE = controller.convergence_controllers[ + np.arange(len(controller.convergence_controllers))[ + [type(me).__name__ == SwitchEstimator.__name__ for me in controller.convergence_controllers] + ][0] + ] + + S.status.slot = 0 + L.status.time = t0 + S.status.iter = 10 + L.status.residual = 0.0 + L.u[0] = P.u_exact(L.status.time) + + L.sweep.predict() + + # perform one update to get state function with different signs + L.sweep.update_nodes() + + SE.get_new_step_size(controller, S) + + t_interp, state_function = SE.params.t_interp, SE.params.state_function + + assert len(t_interp) == len( + state_function + ), 'Length of interpolation values does not match with length of list containing the state function' + + if quad_type == 'LOBATTO': + assert t_interp[0] != t_interp[1], 'Starting time from interpolation axis is not removed!' + assert ( + len(t_interp) == num_nodes + ), f'Number of values on interpolation axis does not match. Expected {num_nodes=}, got {len(t_interp)}' + + elif quad_type == 'RADAU-RIGHT': + assert ( + len(t_interp) == num_nodes + 1 + ), f'Number of values on interpolation axis does not match. Expected {num_nodes + 1=}, got {len(t_interp)}' + + +@pytest.mark.base +@pytest.mark.parametrize('num_nodes', [3, 4, 5]) +def testDetectionBoundary(num_nodes): + """ + This test checks whether a restart is executed or not when the event exactly occurs at the boundary. In this case, + no restart should be done because occuring the event at the boundary means that the event is already resolved well, + i.e., the state function there should have a value close to zero. + + Parameters + ---------- + num_nodes : int + Number of collocation nodes. + """ + + from pySDC.projects.PinTSimE.battery_model import generateDescription, controllerRun + from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit + from pySDC.implementations.problem_classes.DiscontinuousTestODE import ExactDiscontinuousTestODE + from pySDC.implementations.hooks.log_solution import LogSolution + from pySDC.implementations.hooks.log_restarts import LogRestarts + from pySDC.helpers.stats_helper import get_sorted + + problem = ExactDiscontinuousTestODE + problem_params = dict() + t0 = 1.6 + Tend = problem(**problem_params).t_switch_exact + dt = Tend - t0 + + sweeper = generic_implicit + QI = 'IE' + quad_type = 'LOBATTO' + + tol = 1e-10 + + restol, alpha, maxiter, max_restarts, useA, useSE, exact_event_time_avail = getParamsRun() + + hook_class = [LogSolution, LogRestarts] + + description, controller_params, controller = generateDescription( + dt=dt, + problem=problem, + sweeper=sweeper, + num_nodes=num_nodes, + quad_type=quad_type, + QI=QI, + hook_class=hook_class, + use_adaptivity=useA, + use_switch_estimator=useSE, + problem_params=problem_params, + restol=restol, + maxiter=maxiter, + max_restarts=max_restarts, + tol_event=tol, + alpha=alpha, + ) + + stats, _ = controllerRun( + description=description, + controller_params=controller_params, + controller=controller, + t0=t0, + Tend=Tend, + exact_event_time_avail=exact_event_time_avail, + ) + + sum_restarts = np.sum(np.array(get_sorted(stats, type='restart', sortby='time', recomputed=None))[:, 1]) + assert sum_restarts == 0, 'Event occurs at boundary, but restart(s) are executed anyway!' + + +@pytest.mark.base +@pytest.mark.parametrize('tol', [10 ** (-m) for m in range(8, 13)]) +@pytest.mark.parametrize('num_nodes', [3, 4, 5]) +@pytest.mark.parametrize('quad_type', ['LOBATTO', 'RADAU-RIGHT']) +def testDetectionODE(tol, num_nodes, quad_type): + r""" + Here, the switch estimator is applied to a dummy problem of ``DiscontinuousTestODE``, + where the dynamics of the differential equation is replaced by its exact dynamics to see if + the switch estimator predicts the event correctly. The problem is tested for a combination + of different tolerances ``tol`` and different number of collocation nodes ``num_nodes``. + + Since the problem only uses the exact dynamics, the event should be predicted very accurately + by the switch estimator. + + Parameters + ---------- + tol : float + Tolerance for switch estimator. + num_nodes : int + Number of collocation nodes. + quad_type : str + Type of quadrature. + """ + + from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit + from pySDC.implementations.problem_classes.DiscontinuousTestODE import ExactDiscontinuousTestODE + from pySDC.helpers.stats_helper import get_sorted + from pySDC.projects.PinTSimE.battery_model import generateDescription, controllerRun + from pySDC.implementations.hooks.log_solution import LogSolution + from pySDC.implementations.hooks.log_restarts import LogRestarts + + problem = ExactDiscontinuousTestODE + problem_params = dict() + t0 = 1.6 + Tend = 1.62 + dt = Tend - t0 + + sweeper = generic_implicit + QI = 'IE' + + restol, alpha, maxiter, max_restarts, useA, useSE, exact_event_time_avail = getParamsRun() + + hook_class = [LogSolution, LogRestarts] + + description, controller_params, controller = generateDescription( + dt=dt, + problem=problem, + sweeper=sweeper, + num_nodes=num_nodes, + quad_type=quad_type, + QI=QI, + hook_class=hook_class, + use_adaptivity=useA, + use_switch_estimator=useSE, + problem_params=problem_params, + restol=restol, + maxiter=maxiter, + max_restarts=max_restarts, + tol_event=tol, + alpha=alpha, + ) + + stats, t_switch_exact = controllerRun( + description=description, + controller_params=controller_params, + controller=controller, + t0=t0, + Tend=Tend, + exact_event_time_avail=exact_event_time_avail, + ) + + # in this specific example only one event has to be found + switches = [me[1] for me in get_sorted(stats, type='switch', sortby='time', recomputed=False)] + assert len(switches) >= 1, f'{problem.__name__}: No events found for tol={tol}!' + + t_switch = switches[-1] + event_err = abs(t_switch - t_switch_exact) + assert np.isclose(event_err, 0, atol=1.2e-11), f'Event time error {event_err=} is not small enough!' + + +@pytest.mark.base +@pytest.mark.parametrize('num_nodes', [3, 4, 5]) +def testDetectionDAE(num_nodes): + r""" + In this test, the switch estimator is applied to a DAE dummy problem of ``DiscontinuousTestDAE``, + where the dynamics of the differential equation is replaced by its exact dynamics to see if + the switch estimator predicts the event correctly. The problem is tested for a combination + of different tolerances ``tol`` and different number of collocation nodes ``num_nodes``. + + Since the problem only uses the exact dynamics, the event should be predicted very accurately + by the switch estimator. + + Parameters + ---------- + tol : float + Tolerance for switch estimator. + num_nodes : int + Number of collocation nodes. + quad_type : str + Type of quadrature. + """ + + from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE + from pySDC.projects.DAE.problems.DiscontinuousTestDAE import DiscontinuousTestDAE + from pySDC.helpers.stats_helper import get_sorted + from pySDC.projects.PinTSimE.battery_model import generateDescription, controllerRun + from pySDC.implementations.hooks.log_solution import LogSolution + from pySDC.implementations.hooks.log_restarts import LogRestarts + from pySDC.projects.DAE.misc.HookClass_DAE import LogGlobalErrorPostStepDifferentialVariable + from pySDC.projects.PinTSimE.paper_PSCC2024.log_event import LogEventDiscontinuousTestDAE + + problem = DiscontinuousTestDAE + problem_params = {'newton_tol': 1e-6} + t0 = 4.6 + Tend = 4.62 + dt = Tend - t0 + tol = 1e-10 + + sweeper = fully_implicit_DAE + QI = 'LU' + quad_type = 'RADAU-RIGHT' + + _, _, _, _, useA, useSE, exact_event_time_avail = getParamsRun() + + restol = 1e-11 + maxiter = 60 + max_restarts = 20 + alpha = 0.97 + + hook_class = [LogSolution, LogRestarts, LogEventDiscontinuousTestDAE, LogGlobalErrorPostStepDifferentialVariable] + + description, controller_params, controller = generateDescription( + dt=dt, + problem=problem, + sweeper=sweeper, + num_nodes=num_nodes, + quad_type=quad_type, + QI=QI, + hook_class=hook_class, + use_adaptivity=useA, + use_switch_estimator=useSE, + problem_params=problem_params, + restol=restol, + maxiter=maxiter, + max_restarts=max_restarts, + tol_event=tol, + alpha=alpha, + ) + + stats, t_switch_exact = controllerRun( + description=description, + controller_params=controller_params, + controller=controller, + t0=t0, + Tend=Tend, + exact_event_time_avail=exact_event_time_avail, + ) + + # in this specific example only one event has to be found + switches = [me[1] for me in get_sorted(stats, type='switch', sortby='time', recomputed=False)] + assert len(switches) >= 1, f'{problem.__name__}: No events found for {tol=} and {num_nodes=}!' + + t_switch = switches[-1] + event_err = abs(t_switch - t_switch_exact) + assert np.isclose(event_err, 0, atol=2.2e-6), f'Event time error {event_err=} is not small enough!' + + h = np.array([val[1] for val in get_sorted(stats, type='state_function', sortby='time', recomputed=False)]) + assert np.isclose(abs(h[-1]), 0.0, atol=2e-9), f'State function is not close to zero; value is {h[-1]}' + + e_global = np.array(get_sorted(stats, type='e_global_differential_post_step', sortby='time', recomputed=False)) + assert np.isclose( + e_global[-1, 1], 0.0, atol=9.93e-10 + ), f"Error at end time is too large! Expected {1e-11}, got {e_global[-1, 1]}" diff --git a/pySDC/tests/test_projects/test_second_orderSDC/test_convergence.py b/pySDC/tests/test_projects/test_second_orderSDC/test_convergence.py index 3d16a049f4..a459cb269f 100644 --- a/pySDC/tests/test_projects/test_second_orderSDC/test_convergence.py +++ b/pySDC/tests/test_projects/test_second_orderSDC/test_convergence.py @@ -35,11 +35,11 @@ def test_global_convergence(axis): def BorisSDC_global_convergence(): from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error + from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError controller_params, description = penningtrap_params() description['level_params']['dt'] = 0.015625 * 2 - conv = compute_error(controller_params, description, time_iter=3, K_iter=(1, 2, 3)) + conv = ComputeError(controller_params, description, time_iter=3, K_iter=(1, 2, 3)) conv.error_type = 'global' conv.compute_global_error_data() @@ -85,13 +85,13 @@ def sort_order(cwd='', filename='data/local_order_vs_approx_order.csv'): def BorisSDC_horizontal_axis(): - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error + from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params controller_params, description = penningtrap_params() - description['level_params']['dt'] = 0.015625 / 8 + description['level_params']['dt'] = 0.015625 / 4 - conv = compute_error(controller_params, description, time_iter=3) + conv = ComputeError(controller_params, description, time_iter=3) conv.compute_local_error_data() conv.find_approximate_order() @@ -113,13 +113,13 @@ def test_horizontal_axis(value): def BorisSDC_vertical_axis(): - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error + from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params controller_params, description = penningtrap_params() - description['level_params']['dt'] = 0.015625 * 8 + description['level_params']['dt'] = 0.015625 * 4 - conv = compute_error(controller_params, description, time_iter=3) + conv = ComputeError(controller_params, description, time_iter=3) conv.compute_local_error_data() conv.find_approximate_order() @@ -150,7 +150,7 @@ def numerical_order(time_data, error): @pytest.mark.parametrize('sweeper_name', METHODS) def test_RKN_VV(sweeper_name, cwd=''): import numpy as np - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error + from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params controller_params, description = penningtrap_params() @@ -160,7 +160,7 @@ def test_RKN_VV(sweeper_name, cwd=''): time_iter = np.array([1, 1 / 2, 1 / 4]) time = description['level_params']['dt'] * time_iter - P = compute_error(controller_params, description, time_iter=3) + P = ComputeError(controller_params, description, time_iter=3) if sweeper_name == 'Velocity_Verlet': P.compute_global_error_data(VV=True, work_counter=True) diff --git a/pySDC/tests/test_projects/test_second_orderSDC/test_stability.py b/pySDC/tests/test_projects/test_second_orderSDC/test_stability.py index d0575367d6..54e6f55dcf 100644 --- a/pySDC/tests/test_projects/test_second_orderSDC/test_stability.py +++ b/pySDC/tests/test_projects/test_second_orderSDC/test_stability.py @@ -2,51 +2,56 @@ @pytest.mark.base -def test_stability(): +def test_stability_SDC(): """ Stability domain test only the values of mu=[6, 20] and kappa=[3, 20] It is stable at mu=6, kappa=3 otherwise it is instable """ import numpy as np - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import Stability_implementation - from pySDC.projects.Second_orderSDC.dampedharmonic_oscillator_run_stability import dampedharmonic_oscillator_params - - description = dampedharmonic_oscillator_params() - Stability = Stability_implementation(description, kappa_max=14, mu_max=14, Num_iter=(2, 2)) - Stability.lambda_kappa = np.array([6, 20]) - Stability.lambda_mu = np.array([3, 20]) - SDC, KSDC, *_ = Stability.stability_data() - assert ( - SDC[0, 0] <= 1 - ), f'The SDC method is instable at mu={Stability.lambda_mu[0]} and kappa={Stability.lambda_kappa[0]}' - assert ( - SDC[-1, -1] > 1 - ), f'The SDC method is stable at mu={Stability.lambda_mu[-1]} and kappa={Stability.lambda_kappa[-1]}' + from pySDC.projects.Second_orderSDC.stability_simulation import check_points_and_interval + from pySDC.projects.Second_orderSDC.harmonic_oscillator_params import get_default_harmonic_oscillator_description + + description = get_default_harmonic_oscillator_description() + # Additonal params to compute stability points + helper_params = { + 'quad_type_list': ('GAUSS',), + 'Num_iter': (2, 2), + 'num_nodes_list': np.arange(3, 4, 1), + 'max_iter_list': np.arange(5, 6, 1), + } + + points = ((3, 6), (20, 20)) + # Iterate through points and perform stability check + point0 = check_points_and_interval(description, helper_params, points[0], check_stability_point=True) + point1 = check_points_and_interval(description, helper_params, points[1], check_stability_point=True) + + assert point0[-1][-1] <= 1, f'The SDC method is instable at mu={points[0][1]} and kappa={points[0][0]}' + assert point1[-1][-1] > 1, f'The SDC method is stable at mu={points[1][1]} and kappa={points[1][0]}' @pytest.mark.base def test_RKN_stability(): """ - Stability domain test - only the values of mu=[6, 20] and kappa=[3, 20] + Stability domain test for RKN + only the values of mu=[1, 20] and kappa=[1, 20] It is stable at mu=6, kappa=3 otherwise it is instable """ import numpy as np - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import Stability_implementation - from pySDC.projects.Second_orderSDC.dampedharmonic_oscillator_run_stability import dampedharmonic_oscillator_params - - description = dampedharmonic_oscillator_params() - Stability = Stability_implementation(description, kappa_max=14, mu_max=14, Num_iter=(2, 2)) - Stability.lambda_kappa = np.array([1, 20]) - Stability.lambda_mu = np.array([1, 20]) - stab_RKN = Stability.stability_data_RKN() + from pySDC.projects.Second_orderSDC.stability_simulation import StabilityImplementation + from pySDC.projects.Second_orderSDC.harmonic_oscillator_params import get_default_harmonic_oscillator_description + + description = get_default_harmonic_oscillator_description() + stability = StabilityImplementation(description, kappa_max=14, mu_max=14, Num_iter=(2, 2)) + stability.lambda_kappa = np.array([1, 20]) + stability.lambda_mu = np.array([1, 20]) + stab_RKN = stability.stability_data_RKN() assert ( stab_RKN[0, 0] <= 1 - ), f'The SDC method is instable at mu={Stability.lambda_mu[0]} and kappa={Stability.lambda_kappa[0]}' + ), f'The RKN method is instable at mu={stability.lambda_mu[0]} and kappa={stability.lambda_kappa[0]}' assert ( stab_RKN[-1, -1] > 1 - ), f'The SDC method is stable at mu={Stability.lambda_mu[-1]} and kappa={Stability.lambda_kappa[-1]}' + ), f'The RKN method is stable at mu={stability.lambda_mu[-1]} and kappa={stability.lambda_kappa[-1]}' if __name__ == '__main__': diff --git a/pySDC/tests/test_sweepers/test_MPI_sweeper.py b/pySDC/tests/test_sweepers/test_MPI_sweeper.py index 1e46dd734e..4ad9791b7a 100644 --- a/pySDC/tests/test_sweepers/test_MPI_sweeper.py +++ b/pySDC/tests/test_sweepers/test_MPI_sweeper.py @@ -1,7 +1,7 @@ import pytest -def run(use_MPI, num_nodes, quad_type, residual_type, imex): +def run(use_MPI, num_nodes, quad_type, residual_type, imex, initGuess, useNCCL): """ Run a single sweep for a problem and compute the solution at the end point with a sweeper as specified. @@ -11,11 +11,12 @@ def run(use_MPI, num_nodes, quad_type, residual_type, imex): quad_type (str): Type of nodes residual_type (str): Type of residual computation imex (bool): Use IMEX sweeper or not + initGuess (str): which initial guess should be used + useNCCL (bool): ... Returns: pySDC.Level.level: The level containing relevant data """ - import numpy as np from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI if not imex: @@ -34,9 +35,25 @@ def run(use_MPI, num_nodes, quad_type, residual_type, imex): from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_forced as problem_class dt = 1e-1 - sweeper_params = {'num_nodes': num_nodes, 'quad_type': quad_type, 'QI': 'IEpar', 'QE': 'PIC'} + sweeper_params = { + 'num_nodes': num_nodes, + 'quad_type': quad_type, + 'QI': 'IEpar', + 'QE': 'PIC', + "initial_guess": initGuess, + } + problem_params = {} + + if useNCCL: + from pySDC.helpers.NCCL_communicator import NCCLComm + from mpi4py import MPI + + sweeper_params['comm'] = NCCLComm(MPI.COMM_WORLD) + problem_params['useGPU'] = True + description = {} description['problem_class'] = problem_class + description['problem_params'] = problem_params description['sweeper_class'] = sweeper_class description['sweeper_params'] = sweeper_params description['level_params'] = {'dt': dt, 'residual_type': residual_type} @@ -47,18 +64,13 @@ def run(use_MPI, num_nodes, quad_type, residual_type, imex): if imex: u0 = controller.MS[0].levels[0].prob.u_exact(0) else: - u0 = np.ones_like(controller.MS[0].levels[0].prob.u_exact(0)) + u0 = controller.MS[0].levels[0].prob.u_exact(0) + 1.0 controller.run(u0, 0, dt) controller.MS[0].levels[0].sweep.compute_end_point() return controller.MS[0].levels[0] -@pytest.mark.mpi4py -@pytest.mark.parametrize("num_nodes", [2]) -@pytest.mark.parametrize("quad_type", ['GAUSS', 'RADAU-RIGHT']) -@pytest.mark.parametrize("residual_type", ['last_abs', 'full_rel']) -@pytest.mark.parametrize("imex", [True, False]) -def test_sweeper(num_nodes, quad_type, residual_type, imex, launch=True): +def individual_test(num_nodes, quad_type, residual_type, imex, initGuess, useNCCL, launch=True): """ Make a test if the result matches between the MPI and non-MPI versions of a sweeper. Tests solution at the right end point and the residual. @@ -68,6 +80,8 @@ def test_sweeper(num_nodes, quad_type, residual_type, imex, launch=True): quad_type (str): Type of nodes residual_type (str): Type of residual computation imex (bool): Use IMEX sweeper or not + initGuess (str): which initial guess should be used + useNCCL (bool): ... launch (bool): If yes, it will launch `mpirun` with the required number of processes """ if launch: @@ -79,7 +93,7 @@ def test_sweeper(num_nodes, quad_type, residual_type, imex, launch=True): my_env['PYTHONPATH'] = '../../..:.' my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml' - cmd = f"mpirun -np {num_nodes} python {__file__} --test_sweeper {num_nodes} {quad_type} {residual_type} {imex}".split() + cmd = f"mpirun -np {num_nodes} python {__file__} --test_sweeper {num_nodes} {quad_type} {residual_type} {imex} {initGuess} {useNCCL}".split() p = subprocess.Popen(cmd, env=my_env, cwd=".") @@ -89,20 +103,82 @@ def test_sweeper(num_nodes, quad_type, residual_type, imex, launch=True): num_nodes, ) else: - import numpy as np - - imex = False if imex == 'False' else True - MPI = run(use_MPI=True, num_nodes=int(num_nodes), quad_type=quad_type, residual_type=residual_type, imex=imex) + if useNCCL: + import cupy as xp + else: + import numpy as xp + + MPI = run( + use_MPI=True, + num_nodes=int(num_nodes), + quad_type=quad_type, + residual_type=residual_type, + imex=imex, + initGuess=initGuess, + useNCCL=useNCCL, + ) nonMPI = run( - use_MPI=False, num_nodes=int(num_nodes), quad_type=quad_type, residual_type=residual_type, imex=imex + use_MPI=False, + num_nodes=int(num_nodes), + quad_type=quad_type, + residual_type=residual_type, + imex=imex, + initGuess=initGuess, + useNCCL=False, ) - assert np.allclose(MPI.uend, nonMPI.uend, atol=1e-14), 'Got different solutions at end point!' - assert np.allclose(MPI.status.residual, nonMPI.status.residual, atol=1e-14), 'Got different residuals!' + assert xp.allclose(MPI.uend, nonMPI.uend, atol=1e-14), 'Got different solutions at end point!' + assert xp.allclose(MPI.status.residual, nonMPI.status.residual, atol=1e-14), 'Got different residuals!' + + +@pytest.mark.mpi4py +@pytest.mark.parametrize("num_nodes", [2]) +@pytest.mark.parametrize("quad_type", ['GAUSS', 'RADAU-RIGHT']) +@pytest.mark.parametrize("residual_type", ['last_abs', 'full_rel']) +@pytest.mark.parametrize("imex", [True, False]) +@pytest.mark.parametrize("initGuess", ['spread', 'copy', 'zero']) +def test_sweeper(num_nodes, quad_type, residual_type, imex, initGuess, launch=True): + """ + Make a test if the result matches between the MPI and non-MPI versions of a sweeper. + Tests solution at the right end point and the residual. + + Args: + num_nodes (int): The number of nodes to use + quad_type (str): Type of nodes + residual_type (str): Type of residual computation + imex (bool): Use IMEX sweeper or not + launch (bool): If yes, it will launch `mpirun` with the required number of processes + """ + individual_test(num_nodes, quad_type, residual_type, imex, initGuess, useNCCL=False, launch=launch) + + +@pytest.mark.cupy +@pytest.mark.parametrize("num_nodes", [2]) +@pytest.mark.parametrize("quad_type", ['GAUSS', 'RADAU-RIGHT']) +@pytest.mark.parametrize("residual_type", ['last_abs', 'full_rel']) +@pytest.mark.parametrize("imex", [False]) +@pytest.mark.parametrize("initGuess", ['spread', 'copy', 'zero']) +def test_sweeper_NCCL(num_nodes, quad_type, residual_type, imex, initGuess, launch=True): + """ + Make a test if the result matches between the MPI and non-MPI versions of a sweeper. + Tests solution at the right end point and the residual. + + Args: + num_nodes (int): The number of nodes to use + quad_type (str): Type of nodes + residual_type (str): Type of residual computation + imex (bool): Use IMEX sweeper or not + launch (bool): If yes, it will launch `mpirun` with the required number of processes + """ + individual_test(num_nodes, quad_type, residual_type, imex, initGuess, useNCCL=True, launch=launch) if __name__ == '__main__': import sys if '--test_sweeper' in sys.argv: - test_sweeper(sys.argv[-4], sys.argv[-3], sys.argv[-2], sys.argv[-1], launch=False) + imex = False if sys.argv[-3] == 'False' else True + useNCCL = False if sys.argv[-1] == 'False' else True + individual_test( + sys.argv[-6], sys.argv[-5], sys.argv[-4], imex=imex, initGuess=sys.argv[-2], useNCCL=useNCCL, launch=False + ) diff --git a/pySDC/tests/test_sweepers/test_Runge_Kutta_sweeper.py b/pySDC/tests/test_sweepers/test_Runge_Kutta_sweeper.py index bb4f65df40..9e2ca2144c 100644 --- a/pySDC/tests/test_sweepers/test_Runge_Kutta_sweeper.py +++ b/pySDC/tests/test_sweepers/test_Runge_Kutta_sweeper.py @@ -9,6 +9,7 @@ 'RK4', 'Cash_Karp', 'ESDIRK53', + 'ESDIRK43', 'DIRK43', 'Heun_Euler', 'ARK548L2SAESDIRK', @@ -68,13 +69,14 @@ def single_run(sweeper_name, dt, lambdas, use_RK_sweeper=True, Tend=None, useGPU problem_params = {} else: - from pySDC.implementations.problem_classes.TestEquation_0D import testequation0dXPU + from pySDC.implementations.problem_classes.TestEquation_0D import testequation0d - problem_class = testequation0dXPU.get_XPU_version(version='GPU' if useGPU else 'CPU') + problem_class = testequation0d problem_params = { 'lambdas': lambdas, 'u0': 1.0 + 0.0j, + 'useGPU': useGPU, } sweeper_params = { @@ -134,6 +136,7 @@ def test_order(sweeper_name, useGPU=False): 'CrankNicholson': 3, 'Cash_Karp': 6, 'ESDIRK53': 6, + 'ESDIRK43': 5, 'DIRK43': 5, 'Heun_Euler': 3, 'ARK548L2SAERK': 6, @@ -217,6 +220,7 @@ def test_stability(sweeper_name, useGPU=False): 'CrankNicholson': True, 'Cash_Karp': False, 'ESDIRK53': True, + 'ESDIRK43': True, 'DIRK43': True, 'Heun_Euler': False, 'ARK548L2SAESDIRK': True, diff --git a/pyproject.toml b/pyproject.toml index e7e5c78ef8..37f2be3aad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "flit_core.buildapi" [project] name = 'pySDC' -version = '5.3.0' +version = '5.4.3' description = 'A Python implementation of spectral deferred correction methods and the likes' license = {text = "BSD-2-Clause"} readme = 'README.md' @@ -58,6 +58,7 @@ markers = [ 'benchmark: tests for benchmarking', 'cupy: tests for cupy on GPUs', 'libpressio: tests using the libpressio library', + 'monodomain: tests the monodomain project, which requires previous compilation of c++ code', ] timeout = 300