From a22818f0fe61abe3fab99df11cbd50f6ce52e11e Mon Sep 17 00:00:00 2001 From: Ikrom Akramov <96234984+ikrom96git@users.noreply.github.com> Date: Fri, 12 Jan 2024 14:35:23 +0100 Subject: [PATCH] Review (#388) * Bug is fixed and added new code * new code for the table * Edits in markdown file * some edits in test * Bugs fix * Codecov * I cleaned up my code and separated classes to make it easier to work with. It is not ready yet; if Codecov fails, I will include more tests. * forgot black * flake8 * bug fix * Edits codes according to the comments * Edited codes according to the comments in the GitHub * Defined new function in stability_simulation.py to check stability for given points and excluded codecov function that generates a table. * small edits for codecov * removed no cover --- .../problem_classes/HarmonicOscillator.py | 1 + pySDC/projects/Second_orderSDC/README.md | 37 +- .../Second_orderSDC/check_data_folder.py | 2 +- ...dampedharmonic_oscillator_run_stability.py | 67 -- .../harmonic_oscillator_params.py | 36 + .../harmonic_oscillator_run_points.py | 27 + .../harmonic_oscillator_run_stab_interval.py | 29 + .../harmonic_oscillator_run_stability.py | 26 + .../Second_orderSDC/penningtrap_Simulation.py | 922 +++--------------- .../penningtrap_run_Hamiltonian_error.py | 4 +- .../Second_orderSDC/penningtrap_run_error.py | 4 +- .../penningtrap_run_work_precision.py | 6 +- pySDC/projects/Second_orderSDC/plot_helper.py | 406 ++++++++ .../Second_orderSDC/stability_simulation.py | 324 ++++++ .../test_second_orderSDC/test_convergence.py | 20 +- .../test_second_orderSDC/test_stability.py | 59 +- 16 files changed, 1043 insertions(+), 927 deletions(-) delete mode 100644 pySDC/projects/Second_orderSDC/dampedharmonic_oscillator_run_stability.py create mode 100644 pySDC/projects/Second_orderSDC/harmonic_oscillator_params.py create mode 100644 pySDC/projects/Second_orderSDC/harmonic_oscillator_run_points.py create mode 100644 pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stab_interval.py create mode 100644 pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stability.py create mode 100644 pySDC/projects/Second_orderSDC/plot_helper.py create mode 100644 pySDC/projects/Second_orderSDC/stability_simulation.py diff --git a/pySDC/implementations/problem_classes/HarmonicOscillator.py b/pySDC/implementations/problem_classes/HarmonicOscillator.py index ae62d1d650..d38ab4a096 100644 --- a/pySDC/implementations/problem_classes/HarmonicOscillator.py +++ b/pySDC/implementations/problem_classes/HarmonicOscillator.py @@ -28,6 +28,7 @@ class harmonic_oscillator(ptype): Phase of the oscillation. amp : float, optional Amplitude of the oscillation. + Source: https://beltoforion.de/en/harmonic_oscillator/ """ dtype_u = particles diff --git a/pySDC/projects/Second_orderSDC/README.md b/pySDC/projects/Second_orderSDC/README.md index 510161576d..ffdad1c2b3 100644 --- a/pySDC/projects/Second_orderSDC/README.md +++ b/pySDC/projects/Second_orderSDC/README.md @@ -1,29 +1,52 @@ # Spectral Deferred Correction Methods for Second-Order Problems +**Title:** Spectral Deferred Correction Methods for Second-order Problems + +**Authors:** Ikrom Akramov, Sebastian Götschel, Michael Minion, Daniel Ruprecht, and Robert Speck. + + Python code for implementing the paper's plots on Second-order SDC methods. ## Attribution You are welcome to use and adapt this code under the terms of the BSD license. If you utilize it, either in whole or in part, for a publication, please provide proper citation: -**Title:** Spectral Deferred Correction Methods for Second-order Problems - -**Authors:** Ikrom Akramov, Sebastian Götschel, Michael Minion, Daniel Ruprecht, and Robert Speck. -[![DOI](http://example.com)](http://example.com) + + **BibTeX formatted citation:** + + @misc{akramov2023spectral, + title={Spectral deferred correction methods for second-order problems}, + author={Ikrom Akramov and Sebastian Götschel and Michael Minion and Daniel Ruprecht and Robert Speck}, + year={2023}, + eprint={2310.08352}, + archivePrefix={arXiv}, + primaryClass={math.NA} + + + + + +[![arXiv](https://img.shields.io/badge/arXiv-2310.08352-b31b1b.svg)](https://arxiv.org/abs/2310.08352) ## Reproducing Figures from the Publication -- **Fig. 1:** Execute `dampedharmonic_oscillator_run_stability.py` while setting `kappa_max=18` and `mu_max=18`. -- **Fig. 2:** Run `dampedharmonic_oscillator_run_stability.py` with the following configurations: +- **Fig. 1:** Execute `harmonic_oscillator_run_stability.py` while setting `kappa_max=18` and `mu_max=18`. +- **Fig. 2:** Run `harmonic_oscillator_run_stability.py` with the following configurations: - Set `kappa_max=30` and `mu_max=30`. - Adjust `maxiter` to 1, 2, 3, or 4 and execute each individually. +- **Table 1:** Execute `harmonic_socillator_run_stab_interval.py`: + - To save the results set: `save_file=True` + +- Use the script `harmonic_oscillator_run_points.py` to create a table based on given $(\kappa, \mu)$ points. This table assists in determining suitable values for `M`, `K`, and `quadrature nodes` to ensure stability in the SDC method. + - To save the results set: `save_file=True` + - **Fig. 3:** Run `penningtrap_run_error.py` (Run local convergence: `conv.run_local_error()`) with `dt=0.015625/4` and `axes=(0,)`. - **Fig. 4:** Run `penningtrap_run_error.py` (Run local convergence: `conv.run_local_error()`) using `dt=0.015625*4` and `axes=(2,)`. - **Fig. 5:** Run `penningtrap_run_error.py` (Run global convergence: `conv.run_global_error()`) with `dt=0.015625*2`: - Note: Perform each run individually: first with `axes=(0,)`, then with `axes=(2,)`. - Manually set y-axis limits in `penningtrap_run_error.py`, specifically in lines 147-148. -- **Table 1:** Execute `penningtrap_run_error.py` (Run global convergence: `conv.run_global_error()`) with the following settings: +- **Table 2:** Execute `penningtrap_run_error.py` (Run global convergence: `conv.run_global_error()`) with the following settings: - Expected order and approximate order are saved in the file: `data/global_order_vs_approx_order.csv` - Set: `K_iter=(2, 3, 4, 10)` - For `M=2`: diff --git a/pySDC/projects/Second_orderSDC/check_data_folder.py b/pySDC/projects/Second_orderSDC/check_data_folder.py index f9088d4063..cbd6098bbd 100644 --- a/pySDC/projects/Second_orderSDC/check_data_folder.py +++ b/pySDC/projects/Second_orderSDC/check_data_folder.py @@ -3,7 +3,7 @@ folder_name = "./data" # Check if the folder already exists -if not os.path.isdir(folder_name): # pragma: no cover +if not os.path.isdir(folder_name): # Create the folder os.makedirs(folder_name) else: diff --git a/pySDC/projects/Second_orderSDC/dampedharmonic_oscillator_run_stability.py b/pySDC/projects/Second_orderSDC/dampedharmonic_oscillator_run_stability.py deleted file mode 100644 index 65173d9d1e..0000000000 --- a/pySDC/projects/Second_orderSDC/dampedharmonic_oscillator_run_stability.py +++ /dev/null @@ -1,67 +0,0 @@ -import numpy as np - -from pySDC.implementations.problem_classes.HarmonicOscillator import harmonic_oscillator -from pySDC.implementations.sweeper_classes.boris_2nd_order import boris_2nd_order -from pySDC.projects.Second_orderSDC.penningtrap_Simulation import Stability_implementation - - -def dampedharmonic_oscillator_params(): - """ - Runtine to compute modulues of the stability function - - Returns: - description - """ - - # initialize level parameters - level_params = dict() - level_params['restol'] = 1e-16 - level_params["dt"] = 1.0 - - # initialize problem parameters for the Damped harmonic oscillator problem - problem_params = dict() - problem_params["k"] = 0 - problem_params["mu"] = 0 - problem_params["u0"] = np.array([1, 1]) - - # initialize sweeper parameters - sweeper_params = dict() - sweeper_params['quad_type'] = 'GAUSS' - sweeper_params["num_nodes"] = 3 - sweeper_params["do_coll_update"] = True - sweeper_params["picard_mats_sweep"] = True - - # initialize step parameters - step_params = dict() - step_params['maxiter'] = 50 - - # fill description dictionary for easy step instantiation - description = dict() - description["problem_class"] = harmonic_oscillator - description["problem_params"] = problem_params - description["sweeper_class"] = boris_2nd_order - description["sweeper_params"] = sweeper_params - description["level_params"] = level_params - description["step_params"] = step_params - - return description - - -if __name__ == '__main__': - """ - Damped harmonic oscillatro as test problem for the stability plot: - x'=v - v'=-kappa*x-mu*v - kappa: spring constant - mu: friction - - https://beltoforion.de/en/harmonic_oscillator/ - """ - # exec(open("check_data_folder.py").read()) - description = dampedharmonic_oscillator_params() - Stability = Stability_implementation(description, kappa_max=18, mu_max=18, Num_iter=(200, 200)) - Stability.run_SDC_stability() - Stability.run_Picard_stability() - Stability.run_RKN_stability() - Stability.run_Ksdc() - # Stability.run_Kpicard diff --git a/pySDC/projects/Second_orderSDC/harmonic_oscillator_params.py b/pySDC/projects/Second_orderSDC/harmonic_oscillator_params.py new file mode 100644 index 0000000000..daf6f40d15 --- /dev/null +++ b/pySDC/projects/Second_orderSDC/harmonic_oscillator_params.py @@ -0,0 +1,36 @@ +import numpy as np +from pySDC.implementations.problem_classes.HarmonicOscillator import harmonic_oscillator +from pySDC.implementations.sweeper_classes.boris_2nd_order import boris_2nd_order + + +def get_default_harmonic_oscillator_description(): + """ + Routine to compute modules of the stability function + + Returns: + description (dict): A dictionary containing parameters for the damped harmonic oscillator problem + """ + + # Initialize level parameters + level_params = {'restol': 1e-16, 'dt': 1.0} + + # Initialize problem parameters for the Damped harmonic oscillator problem + problem_params = {'k': 0, 'mu': 0, 'u0': np.array([1, 1])} + + # Initialize sweeper parameters + sweeper_params = {'quad_type': 'GAUSS', 'num_nodes': 3, 'do_coll_update': True, 'picard_mats_sweep': True} + + # Initialize step parameters + step_params = {'maxiter': 5} + + # Fill description dictionary for easy step instantiation + description = { + 'problem_class': harmonic_oscillator, + 'problem_params': problem_params, + 'sweeper_class': boris_2nd_order, + 'sweeper_params': sweeper_params, + 'level_params': level_params, + 'step_params': step_params, + } + + return description diff --git a/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_points.py b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_points.py new file mode 100644 index 0000000000..25741c8a02 --- /dev/null +++ b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_points.py @@ -0,0 +1,27 @@ +import numpy as np +from pySDC.projects.Second_orderSDC.harmonic_oscillator_params import get_default_harmonic_oscillator_description +from pySDC.projects.Second_orderSDC.stability_simulation import compute_and_generate_table + + +if __name__ == '__main__': + ''' + This script generates table for the given points to compare in what quadrature type, + number of nodes and number of iterations the SDC iteration is stable or not. + Additional parameter: + To save the table set: save_points_table=True + Change filename: points_table_filename='FILENAME' by default: './data/point_table.txt' + ''' + # Get default parameters for the harmonic osicillator problem + description = get_default_harmonic_oscillator_description() + # Additonal params to compute stability points + helper_params = { + 'quad_type_list': ('GAUSS', 'LOBATTO'), + 'Num_iter': (2, 2), + 'num_nodes_list': np.arange(3, 6, 1), + 'max_iter_list': np.arange(2, 10, 1), + } + + points = ((1, 100), (3, 100), (10, 100)) + # Iterate through points and perform stability check + for ii in points: + compute_and_generate_table(description, helper_params, ii, check_stability_point=True) diff --git a/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stab_interval.py b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stab_interval.py new file mode 100644 index 0000000000..6e7e1c2bca --- /dev/null +++ b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stab_interval.py @@ -0,0 +1,29 @@ +import numpy as np +from pySDC.projects.Second_orderSDC.harmonic_oscillator_params import get_default_harmonic_oscillator_description +from pySDC.projects.Second_orderSDC.stability_simulation import compute_and_generate_table + + +if __name__ == '__main__': + ''' + To compute maximum stable values for SDC and Picard iterations for the purely oscillatory case with + no damping (mu=0) + Additional parameter: + To save the table set: save_interval_file=True + Change filename: interval_filename='FILENAME' by default: './data/stab_interval.txt' + Output: + it generates to compare with different values of M (number of nodes) and K (maximal number of iterations) + ''' + # Ger default parameters + description = get_default_harmonic_oscillator_description() + # Additional parameters to compute stability interval on the kappa + helper_params = { + 'quad_type_list': ('GAUSS',), + 'Num_iter': (2000, 1), + 'num_nodes_list': np.arange(2, 7, 1), + 'max_iter_list': np.arange(1, 11, 1), + } + + points = ((100, 1e-10),) + # Iterate through points and perform stability check + for ii in points: + compute_and_generate_table(description, helper_params, ii, compute_interval=True) diff --git a/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stability.py b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stability.py new file mode 100644 index 0000000000..a89f740e2f --- /dev/null +++ b/pySDC/projects/Second_orderSDC/harmonic_oscillator_run_stability.py @@ -0,0 +1,26 @@ +from pySDC.projects.Second_orderSDC.harmonic_oscillator_params import get_default_harmonic_oscillator_description +from pySDC.projects.Second_orderSDC.stability_simulation import StabilityImplementation + + +if __name__ == '__main__': + """ + To implement Stability region for the Harmonic Oscillator problem + Run for + SDC stability region: model_stab.run_SDC_stability() + Picard stability region: model_stab.run_Picard_stability() + Runge-Kutta-Nzström stability region: model_run_RKN_stability() + To implement spectral radius of iteration matrix + Run: + Iteration matrix of SDC method: model_stab.run_Ksdc() + Iteration matrix of Picard method: model_stab.run_Kpicard() + + """ + # Execute the stability analysis for the damped harmonic oscillator + description = get_default_harmonic_oscillator_description() + model_stab = StabilityImplementation(description, kappa_max=30, mu_max=30, Num_iter=(200, 200)) + + model_stab.run_SDC_stability() + model_stab.run_Picard_stability() + model_stab.run_RKN_stability() + model_stab.run_Ksdc() + # model_stab.run_Kpicard() diff --git a/pySDC/projects/Second_orderSDC/penningtrap_Simulation.py b/pySDC/projects/Second_orderSDC/penningtrap_Simulation.py index 3c2e08d426..480cfe064b 100644 --- a/pySDC/projects/Second_orderSDC/penningtrap_Simulation.py +++ b/pySDC/projects/Second_orderSDC/penningtrap_Simulation.py @@ -1,419 +1,15 @@ -# import matplotlib - -# matplotlib.use('Agg') -# import os - -import matplotlib.pyplot as plt import numpy as np from pySDC.helpers.stats_helper import get_sorted from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.projects.Second_orderSDC.penningtrap_HookClass import particles_output from pySDC.implementations.sweeper_classes.Runge_Kutta_Nystrom import RKN, Velocity_Verlet -from pySDC.core.Errors import ProblemError -from pySDC.core.Step import step - - -def fixed_plot_params(): # pragma: no cover - """ - Setting fixed parameters for the all of the plots - """ - fs = 16 - plt.rcParams['figure.figsize'] = 7.44, 6.74 - plt.rcParams['pgf.rcfonts'] = False - - plt.rcParams['lines.linewidth'] = 2.5 - plt.rcParams['axes.titlesize'] = fs + 5 - plt.rcParams['axes.labelsize'] = fs + 5 - plt.rcParams['xtick.labelsize'] = fs - plt.rcParams['ytick.labelsize'] = fs - plt.rcParams['xtick.major.pad'] = 5 - plt.rcParams['ytick.major.pad'] = 5 - plt.rcParams['axes.labelpad'] = 6 - plt.rcParams['lines.markersize'] = fs - 2 - plt.rcParams['lines.markeredgewidth'] = 1 - plt.rcParams['mathtext.fontset'] = 'cm' - plt.rcParams['mathtext.rm'] = 'serif' - plt.rc('font', size=fs) - - -class plotmanager(object): # pragma: no cover - """ - This class generates all of the plots of the Second-order SDC plots. - """ - - def __init__(self, controller_params, description, time_iter=3, K_iter=(1, 2, 3), Tend=2, axes=(1,), cwd=''): - self.controller_params = controller_params - self.description = description - self.time_iter = time_iter - self.K_iter = K_iter - self.Tend = Tend - self.axes = axes - self.cwd = cwd - self.quad_type = self.description['sweeper_params']['quad_type'] - self.num_nodes = self.description['sweeper_params']['num_nodes'] - self.error_type = 'local' - - def plot_convergence(self): # pragma: no cover - """ - Plot convergence order plots for the position and velocity - If you change parameters of the values you need set y_lim values need to set manually - """ - fixed_plot_params() - [N, time_data, error_data, order_data, convline] = self.organize_data( - filename='data/dt_vs_{}_errorSDC.csv'.format(self.error_type) - ) - - color = ['r', 'brown', 'g', 'blue'] - shape = ['o', 'd', 's', 'x'] - - fig1, ax1 = plt.subplots() - fig2, ax2 = plt.subplots() - value = self.axes[0] - for ii in range(0, N): - ax1.loglog(time_data[ii, :], convline['pos'][value, ii, :], color='black') - ax1.loglog( - time_data[ii, :], - error_data['pos'][value, ii, :], - ' ', - color=color[ii], - marker=shape[ii], - label='k={}'.format(int(self.K_iter[ii])), - ) - if value == 2: - ax1.text( - time_data[ii, 1], - 0.25 * convline['pos'][value, ii, 1], - r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['pos'][ii, 0, 1]), - size=18, - ) - else: - ax1.text( - time_data[ii, 1], - 0.25 * convline['pos'][value, ii, 1], - r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['pos'][ii, 0, 0]), - size=18, - ) - - if self.error_type == 'Local': - ax1.set_ylabel(r'$\Delta x^{\mathrm{(abs)}}_{%d}$' % (value + 1)) - else: - ax1.set_ylabel(r'$\Delta x^{\mathrm{(rel)}}_{%d}$' % (value + 1)) - ax1.set_title('{} order of convergence, $M={}$'.format(self.error_type, self.num_nodes)) - ax1.set_xlabel(r'$\omega_{B} \cdot \Delta t$') - - ax1.legend(loc='best') - fig1.tight_layout() - fig1.savefig(self.cwd + 'data/{}_conv_plot_pos{}.pdf'.format(self.error_type, value + 1)) - - for ii in range(0, N): - ax2.loglog(time_data[ii, :], convline['vel'][value, ii, :], color='black') - ax2.loglog( - time_data[ii, :], - error_data['vel'][value, ii, :], - ' ', - color=color[ii], - marker=shape[ii], - label='k={}'.format(int(self.K_iter[ii])), - ) - - if value == 2: - ax2.text( - time_data[ii, 1], - 0.25 * convline['vel'][value, ii, 1], - r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['vel'][ii, 0, 1]), - size=18, - ) - else: - ax2.text( - time_data[ii, 1], - 0.25 * convline['vel'][value, ii, 1], - r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['vel'][ii, 0, 0]), - size=18, - ) - - if self.error_type == 'Local': - ax2.set_ylabel(r'$\Delta v^{\mathrm{(abs)}}_{%d}$' % (value + 1)) - else: - ax2.set_ylabel(r'$\Delta v^{\mathrm{(rel)}}_{%d}$' % (value + 1)) - ax2.set_title(r'{} order of convergence, $M={}$'.format(self.error_type, self.num_nodes)) - ax2.set_xlabel(r'$\omega_{B} \cdot \Delta t$') - # ============================================================================= - # Setting y axis min and max values - # ============================================================================= - if self.error_type == 'global': - ax2.set_ylim(1e-14, 1e1) - ax1.set_ylim(1e-14, 1e1) - else: - ax2.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) - ax1.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) - ax2.legend(loc='best') - fig2.tight_layout() - plt.show() - fig2.savefig(self.cwd + 'data/{}_conv_plot_vel{}.pdf'.format(self.error_type, value + 1)) - - def format_number(self, data_value, indx): # pragma: no cover - """ - Change format of the x axis for the work precision plots - """ - if data_value >= 1_000_000: - formatter = "{:1.1f}M".format(data_value * 0.000_001) - else: - formatter = "{:1.0f}K".format(data_value * 0.001) - return formatter - - def plot_work_precision(self): # pragma: no cover - """ - Generate work precision plots - """ - fixed_plot_params() - [N, func_eval_SDC, error_SDC, *_] = self.organize_data( - filename=self.cwd + 'data/rhs_eval_vs_global_errorSDC.csv', - time_iter=self.time_iter, - ) - - [N, func_eval_picard, error_picard, *_] = self.organize_data( - filename=self.cwd + 'data/rhs_eval_vs_global_errorPicard.csv', - time_iter=self.time_iter, - ) +from pySDC.projects.Second_orderSDC.plot_helper import PlotManager - color = ['r', 'brown', 'g', 'blue'] - shape = ['o', 'd', 's', 'x'] - fig1, ax1 = plt.subplots() - fig2, ax2 = plt.subplots() - value = self.axes[0] - if self.RK: - [N, func_eval_RKN, error_RKN, *_] = self.organize_data( - filename=self.cwd + 'data/rhs_eval_vs_global_errorRKN.csv', - time_iter=self.time_iter, - ) - - ax1.loglog( - func_eval_RKN[0], - error_RKN['pos'][value,][0][:], - ls='dashdot', - color='purple', - marker='p', - label='RKN-4', - ) - ax2.loglog( - func_eval_RKN[0], - error_RKN['vel'][value,][0][:], - ls='dashdot', - color='purple', - marker='p', - label='RKN-4', - ) - if self.VV: - [N, func_eval_VV, error_VV, *_] = self.organize_data( - filename=self.cwd + 'data/rhs_eval_vs_global_errorVV.csv', - time_iter=self.time_iter, - ) - - ax1.loglog( - func_eval_VV[0], - error_VV['pos'][value,][0][:], - ls='dashdot', - color='blue', - marker='H', - label='Velocity-Verlet', - ) - ax2.loglog( - func_eval_VV[0], - error_VV['vel'][value,][0][:], - ls='dashdot', - color='blue', - marker='H', - label='Velocity-Verlet', - ) - - for ii, jj in enumerate(self.K_iter): - # ============================================================================= - # # If you want to get exactly the same picture like in paper uncomment this only for vertical axis - # if ii==0 or ii==1: - # ax1.loglog(func_eval_SDC[ii, :][1:], error_SDC['pos'][value, ii, :][1:], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) - # ax1.loglog(func_eval_picard[ii,:][1:], error_picard['pos'][value, ii, :][1:], ls='--', color=color[ii], marker=shape[ii]) - - # ax2.loglog(func_eval_SDC[ii, :][1:], error_SDC['vel'][value, ii, :][1:], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) - # ax2.loglog(func_eval_picard[ii,:][1:], error_picard['vel'][value, ii, :][1:], ls='--', color=color[ii], marker=shape[ii]) - # else: - - # ax1.loglog(func_eval_SDC[ii, :][:-1], error_SDC['pos'][value, ii, :][:-1], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) - # ax1.loglog(func_eval_picard[ii,:][:-1], error_picard['pos'][value, ii, :][:-1], ls='--', color=color[ii], marker=shape[ii]) - - # ax2.loglog(func_eval_SDC[ii, :][:-1], error_SDC['vel'][value, ii, :][:-1], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) - # ax2.loglog(func_eval_picard[ii,:][:-1], error_picard['vel'][value, ii, :][:-1], ls='--', color=color[ii], marker=shape[ii]) - # - # ============================================================================= - ax1.loglog( - func_eval_SDC[ii, :], - error_SDC['pos'][value, ii, :], - ls='solid', - color=color[ii], - marker=shape[ii], - label='k={}'.format(jj), - ) - ax1.loglog( - func_eval_picard[ii, :], error_picard['pos'][value, ii, :], ls='--', color=color[ii], marker=shape[ii] - ) - - ax2.loglog( - func_eval_SDC[ii, :], - error_SDC['vel'][value, ii, :], - ls='solid', - color=color[ii], - marker=shape[ii], - label='k={}'.format(jj), - ) - ax2.loglog( - func_eval_picard[ii, :], error_picard['vel'][value, ii, :], ls='--', color=color[ii], marker=shape[ii] - ) - - xmin = np.min(ax1.get_xlim()) - xmax = np.max(ax2.get_xlim()) - xmin = round(xmin, -3) - xmax = round(xmax, -3) - - xx = np.linspace(np.log(xmin), np.log(xmax), 5) - xx = 3**xx - xx = xx[np.where(xx < xmax)] - # xx=[2*1e+3,4*1e+3, 8*1e+3] - ax1.grid(True) - - ax1.set_title("$M={}$".format(self.num_nodes)) - ax1.set_xlabel("Number of RHS evaluations") - ax1.set_ylabel(r'$\Delta x^{\mathrm{(rel)}}_{%d}$' % (value + 1)) - ax1.loglog([], [], color="black", ls="--", label="Picard iteration") - ax1.loglog([], [], color="black", ls="solid", label="Boris-SDC iteration") - - ax1.set_xticks(xx) - ax1.xaxis.set_major_formatter(self.format_number) - ax1.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) - # ax1.set_ylim(1e-14, 1e+0) - - ax1.legend(loc="best", fontsize=12) - fig1.tight_layout() - fig1.savefig(self.cwd + "data/f_eval_pos_{}_M={}.pdf".format(value, self.num_nodes)) - - ax2.grid(True) - ax2.xaxis.set_major_formatter(self.format_number) - ax2.set_title("$M={}$".format(self.num_nodes)) - ax2.set_xlabel("Number of RHS evaluations") - ax2.set_ylabel(r'$\Delta v^{\mathrm{(rel)}}_{%d}$' % (value + 1)) - ax2.loglog([], [], color="black", ls="--", label="Picard iteration") - ax2.loglog([], [], color="black", ls="solid", label="Boris-SDC iteration") - ax2.set_xticks(xx) - ax2.xaxis.set_major_formatter(self.format_number) - ax2.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) - # ax2.set_ylim(1e-14, 1e+0) - ax2.legend(loc="best", fontsize=12) - fig2.tight_layout() - fig2.savefig(self.cwd + "data/f_eval_vel_{}_M={}.pdf".format(value, self.num_nodes)) - plt.show() - - def organize_data(self, filename='data/dt_vs_local_errorSDC.csv', time_iter=None): # pragma: no cover - """ - Organize data according to plot - Args: - filename (string): data to find approximate order - time_iter : in case it you used different time iterations - """ - if time_iter == None: - time_iter = self.time_iter - - items = np.genfromtxt(filename, delimiter=',', skip_header=1) - time = items[:, 0] - N = int(np.size(time) / time_iter) - - error_data = {'pos': np.zeros([3, N, time_iter]), 'vel': np.zeros([3, N, time_iter])} - order_data = {'pos': np.zeros([N, time_iter, 2]), 'vel': np.zeros([N, time_iter, 2])} - time_data = np.zeros([N, time_iter]) - convline = {'pos': np.zeros([3, N, time_iter]), 'vel': np.zeros([3, N, time_iter])} - - time_data = time.reshape([N, time_iter]) - - order_data['pos'][:, :, 0] = items[:, 1].reshape([N, time_iter]) - order_data['pos'][:, :, 1] = items[:, 2].reshape([N, time_iter]) - order_data['vel'][:, :, 0] = items[:, 6].reshape([N, time_iter]) - order_data['vel'][:, :, 1] = items[:, 7].reshape([N, time_iter]) - - for ii in range(0, 3): - error_data['pos'][ii, :, :] = items[:, ii + 3].reshape([N, time_iter]) - error_data['vel'][ii, :, :] = items[:, ii + 8].reshape([N, time_iter]) - - for jj in range(0, 3): - if jj == 2: - convline['pos'][jj, :, :] = ( - (time_data / time_data[0, 0]).T ** order_data['pos'][:, jj, 1] - ).T * error_data['pos'][jj, :, 0][:, None] - convline['vel'][jj, :, :] = ( - (time_data / time_data[0, 0]).T ** order_data['vel'][:, jj, 1] - ).T * error_data['vel'][jj, :, 0][:, None] - else: - convline['pos'][jj, :, :] = ( - (time_data / time_data[0, 0]).T ** order_data['pos'][:, jj, 0] - ).T * error_data['pos'][jj, :, 0][:, None] - convline['vel'][jj, :, :] = ( - (time_data / time_data[0, 0]).T ** order_data['vel'][:, jj, 0] - ).T * error_data['vel'][jj, :, 0][:, None] - - return [N, time_data, error_data, order_data, convline] - - # find approximate order - def find_approximate_order(self, filename='data/dt_vs_local_errorSDC.csv'): - """ - This function finds approximate convergence rate and saves in the data folder - Args: - filename: given data - return: - None - """ - [N, time_data, error_data, order_data, convline] = self.organize_data(self.cwd + filename) - approx_order = {'pos': np.zeros([1, N]), 'vel': np.zeros([1, N])} - - for jj in range(0, 3): - if jj == 0: - file = open(self.cwd + 'data/{}_order_vs_approx_order.csv'.format(self.error_type), 'w') - - else: - file = open(self.cwd + 'data/{}_order_vs_approx_order.csv'.format(self.error_type), 'a') - - for ii in range(0, N): - approx_order['pos'][0, ii] = np.polyfit( - np.log(time_data[ii, :]), np.log(error_data['pos'][jj, ii, :]), 1 - )[0].real - approx_order['vel'][0, ii] = np.polyfit( - np.log(time_data[ii, :]), np.log(error_data['vel'][jj, ii, :]), 1 - )[0].real - if jj == 2: - file.write( - str(order_data['pos'][:, jj, 1]) - + ' | ' - + str(approx_order['pos'][0]) - + ' | ' - + str(order_data['vel'][:, jj, 1]) - + ' | ' - + str(approx_order['vel'][0]) - + '\n' - ) - else: - file.write( - str(order_data['pos'][:, jj, 0]) - + ' | ' - + str(approx_order['pos'][0]) - + ' | ' - + str(order_data['vel'][:, jj, 0]) - + ' | ' - + str(approx_order['vel'][0]) - + '\n' - ) - file.close() - - -class compute_error(plotmanager): +class ComputeError(PlotManager): """ - This class generates the data for the plots and computations for Second-order SDC + This class generates data for plots and computations for Second-order SDC """ def __init__(self, controller_params, description, time_iter=3, K_iter=(1, 2, 3), Tend=2, axes=(1,), cwd=''): @@ -421,30 +17,29 @@ def __init__(self, controller_params, description, time_iter=3, K_iter=(1, 2, 3) controller_params, description, time_iter=time_iter, K_iter=K_iter, Tend=Tend, axes=axes, cwd='' ) - def run_local_error(self): # pragma: no cover + def run_local_error(self): """ - This function controlls everything to generate local convergence rate + Controls everything to generate local convergence rate """ self.compute_local_error_data() - # self.find_approximate_order() self.plot_convergence() - def run_global_error(self): # pragma: no cover + def run_global_error(self): """ - This function for the global convergence order together it finds approximate order + Computes global convergence order and finds approximate order """ self.error_type = 'global' self.compute_global_error_data() self.find_approximate_order(filename='data/dt_vs_global_errorSDC.csv') self.plot_convergence() - def run_work_precision(self, RK=True, VV=False, dt_cont=1): # pragma: no cover + def run_work_precision(self, RK=True, VV=False, dt_cont=1): """ - To implement work-precision of Second-order SDC + Implements work-precision of Second-order SDC Args: - RK: True or False to include in the picture RKN method - VV: True or False to include in the picture Velocity-Verlet Scheme - dt_cont: moves RK and VV left to right (I could't find the best way instead of this) + RK: True or False to include RKN method + VV: True or False to include Velocity-Verlet Scheme + dt_cont: moves RK and VV left to right """ self.RK = RK self.VV = VV @@ -458,9 +53,8 @@ def run_work_precision(self, RK=True, VV=False, dt_cont=1): # pragma: no cover def compute_local_error_data(self): """ - Compute local convergece rate and save this data + Computes local convergence rate and saves the data """ - step_params = dict() dt_val = self.description['level_params']['dt'] @@ -468,208 +62,126 @@ def compute_local_error_data(self): step_params['maxiter'] = order self.description['step_params'] = step_params - if order == self.K_iter[0]: - file = open(self.cwd + 'data/dt_vs_local_errorSDC.csv', 'w') - file.write( - str('Time_steps') - + " | " - + str('Order_pos') - + " | " - + str('Abs_error_position') - + " | " - + str('Order_vel') - + " | " - + str('Abs_error_velocity') - + '\n' - ) - else: - file = open(self.cwd + 'data/dt_vs_local_errorSDC.csv', 'a') - - for ii in range(0, self.time_iter): - dt = dt_val / 2**ii - - self.description['level_params']['dt'] = dt - self.description['level_params'] = self.description['level_params'] - - # instantiate the controller (no controller parameters used here) - controller = controller_nonMPI( - num_procs=1, controller_params=self.controller_params, description=self.description - ) - - # set time parameters - t0 = 0.0 - Tend = dt - - # get initial values on finest level - P = controller.MS[0].levels[0].prob - uinit = P.u_init() - - # call main function to get things done... - uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) - - # compute exact solution and compare - uex = P.u_exact(Tend) - - # find order of quadrature rule - coll_order = controller.MS[0].levels[0].sweep.coll.order - - # find order of convergence for the postion and velocity - order_pos = list(self.local_order_pos(order, coll_order)) - order_vel = list(self.local_order_vel(order, coll_order)) - # evaluate error - error_pos = list(np.abs((uex - uend).pos).T[0]) - error_vel = list(np.abs((uex - uend).vel).T[0]) - - dt_omega = dt * self.description['problem_params']['omega_B'] - file.write( - str(dt_omega) - + ", " - + str(', '.join(map(str, order_pos))) - + ", " - + str(', '.join(map(str, error_pos))) - + ", " - + str(', '.join(map(str, order_vel))) - + ", " - + str(', '.join(map(str, error_vel))) - + '\n' - ) - - file.close() + file_path = self.cwd + 'data/dt_vs_local_errorSDC.csv' + mode = 'w' if order == self.K_iter[0] else 'a' + + with open(file_path, mode) as file: + if order == self.K_iter[0]: + file.write("Time_steps | Order_pos | Abs_error_position | Order_vel | Abs_error_velocity\n") + + for ii in range(0, self.time_iter): + dt = dt_val / 2**ii + + self.description['level_params']['dt'] = dt + self.description['level_params'] = self.description['level_params'] + + controller = controller_nonMPI( + num_procs=1, controller_params=self.controller_params, description=self.description + ) + + t0, Tend = 0.0, dt + P = controller.MS[0].levels[0].prob + uinit = P.u_init() + + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + uex = P.u_exact(Tend) + coll_order = controller.MS[0].levels[0].sweep.coll.order + order_pos = list(self.local_order_pos(order, coll_order)) + order_vel = list(self.local_order_vel(order, coll_order)) + error_pos = list(np.abs((uex - uend).pos).T[0]) + error_vel = list(np.abs((uex - uend).vel).T[0]) + + dt_omega = dt * self.description['problem_params']['omega_B'] + file.write( + f"{dt_omega}, {', '.join(map(str, order_pos))}, {', '.join(map(str, error_pos))}," + f" {', '.join(map(str, order_vel))}, {', '.join(map(str, error_vel))}\n" + ) def compute_global_error_data(self, Picard=False, RK=False, VV=False, work_counter=False, dt_cont=1): """ - Compute global convergence data and save it into the data folder + Computes global convergence data and saves it into the data folder Args: Picard: bool, Picard iteration computation RK: bool, RKN method VV: bool, Velocity-Verlet scheme - work_counter: bool, compute rhs for the work precision + work_counter: bool, compute rhs for work precision dt_cont: moves the data left to right for RK and VV method """ - K_iter = self.K_iter + name, description = '', self.description + if Picard: name = 'Picard' - description = self.description description['sweeper_params']['QI'] = 'PIC' description['sweeper_params']['QE'] = 'PIC' - elif RK: - K_iter = (1,) - name = 'RKN' - description = self.description - description['sweeper_class'] = RKN + K_iter, name, description['sweeper_class'] = (1,), 'RKN', RKN elif VV: - K_iter = (1,) - name = 'VV' - description = self.description - description['sweeper_class'] = Velocity_Verlet + K_iter, name, description['sweeper_class'] = (1,), 'VV', Velocity_Verlet else: name = 'SDC' - description = self.description + self.controller_params['hook_class'] = particles_output - step_params = dict() - dt_val = self.description['level_params']['dt'] + step_params, dt_val = dict(), self.description['level_params']['dt'] + values, error = ['position', 'velocity'], dict() - values = ['position', 'velocity'] + filename = f"data/{'rhs_eval_vs_global_error' if work_counter else 'dt_vs_global_error'}{name}.csv" - error = dict() + for order in K_iter: + u_val, uex_val = dict(), dict() + step_params['maxiter'], description['step_params'] = order, step_params - if work_counter: - filename = 'data/rhs_eval_vs_global_error{}.csv'.format(name) - else: - filename = 'data/dt_vs_global_error{}.csv'.format(name) + file_path = self.cwd + filename + mode = 'w' if order == K_iter[0] else 'a' - for order in K_iter: - u_val = dict() - uex_val = dict() + with open(file_path, mode) as file: + if order == K_iter[0]: + file.write( + "Time_steps/Work_counter | Order_pos | Abs_error_position | Order_vel | Abs_error_velocity\n" + ) + + cont = 2 if self.time_iter == 3 else 2 ** abs(3 - self.time_iter) + cont = cont if not Picard else dt_cont + + for ii in range(0, self.time_iter): + dt = (dt_val * cont) / 2**ii + + description['level_params']['dt'] = dt + description['level_params'] = self.description['level_params'] + + controller = controller_nonMPI( + num_procs=1, controller_params=self.controller_params, description=description + ) + + t0, Tend = 0.0, self.Tend + P = controller.MS[0].levels[0].prob + uinit = P.u_init() + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + + func_eval = P.work_counters['Boris_solver'].niter + P.work_counters['rhs'].niter + + for nn in values: + u_val[nn] = get_sorted(stats, type=nn, sortby='time') + uex_val[nn] = get_sorted(stats, type=nn + '_exact', sortby='time') + error[nn] = self.relative_error(uex_val[nn], u_val[nn]) + error[nn] = list(error[nn].T[0]) + + if RK or VV: + global_order = np.array([4, 4]) + else: + coll_order = controller.MS[0].levels[0].sweep.coll.order + global_order = list(self.global_order(order, coll_order)) + global_order = np.array([4, 4]) if RK or VV else list(self.global_order(order, coll_order)) + + dt_omega = dt * self.description['problem_params']['omega_B'] + save = func_eval if work_counter else dt_omega + + file.write( + f"{save}, {', '.join(map(str, global_order))}, {', '.join(map(str, error['position']))}," + f" {', '.join(map(str, global_order))}, {', '.join(map(str, error['velocity']))}\n" + ) - step_params['maxiter'] = order - description['step_params'] = step_params - - if order == K_iter[0]: - file = open(self.cwd + filename, 'w') - file.write( - str('Time_steps/Work_counter') - + " | " - + str('Order_pos') - + " | " - + str('Abs_error_position') - + " | " - + str('Order_vel') - + " | " - + str('Abs_error_velocity') - + '\n' - ) - else: - file = open(self.cwd + filename, 'a') - - # Controller for plot - if Picard: - if self.time_iter == 3: - cont = 2 - else: - tt = np.abs(3 - self.time_iter) - cont = 2**tt + 2 - else: - cont = dt_cont - - for ii in range(0, self.time_iter): - dt = (dt_val * cont) / 2**ii - - description['level_params']['dt'] = dt - description['level_params'] = self.description['level_params'] - - # instantiate the controller (no controller parameters used here) - controller = controller_nonMPI( - num_procs=1, controller_params=self.controller_params, description=description - ) - - # set time parameters - t0 = 0.0 - # Tend = dt - Tend = self.Tend - - # get initial values on finest level - P = controller.MS[0].levels[0].prob - uinit = P.u_init() - - # call main function to get things done... - uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) - # rhs function evaluation - func_eval = P.work_counters['Boris_solver'].niter + P.work_counters['rhs'].niter - # extract values from stats - for _, nn in enumerate(values): - u_val[nn] = get_sorted(stats, type=nn, sortby='time') - uex_val[nn] = get_sorted(stats, type=nn + '_exact', sortby='time') - error[nn] = self.relative_error(uex_val[nn], u_val[nn]) - error[nn] = list(error[nn].T[0]) - - if RK or VV: - global_order = np.array([4, 4]) - else: - coll_order = controller.MS[0].levels[0].sweep.coll.order - global_order = list(self.global_order(order, coll_order)) - dt_omega = dt * self.description['problem_params']['omega_B'] - if work_counter: - save = func_eval - else: - save = dt_omega - file.write( - str(save) - + ", " - + str(', '.join(map(str, global_order))) - + ", " - + str(', '.join(map(str, error['position']))) - + ", " - + str(', '.join(map(str, global_order))) - + ", " - + str(', '.join(map(str, error['velocity']))) - + '\n' - ) - file.close() - - # find expected local convergence order for position def local_order_pos(self, order_K, order_quad): if self.description['sweeper_params']['initial_guess'] == 'spread': if self.quad_type == 'GAUSS' or self.quad_type == 'RADAU-RIGHT': @@ -677,16 +189,15 @@ def local_order_pos(self, order_K, order_quad): elif self.quad_type == 'LOBATTO' or self.quad_type == 'RADAU-LEFT': return np.array([np.min([order_K + 2 + 2, order_quad]), np.min([2 * order_K + 3, order_quad])]) else: - raise NotImplementedError('order of convergence explicitly not implemented ') + raise NotImplementedError('Order of convergence explicitly not implemented') else: if self.quad_type == 'GAUSS' or self.quad_type == 'RADAU-RIGHT': return np.array([np.min([order_K + 2, order_quad]), np.min([2 * order_K + 3, order_quad])]) elif self.quad_type == 'LOBATTO' or self.quad_type == 'RADAU-LEFT': return np.array([np.min([order_K + 2, order_quad]), np.min([2 * order_K + 3, order_quad])]) else: - raise NotImplementedError('order of convergence explicitly not implemented ') + raise NotImplementedError('Order of convergence explicitly not implemented') - # find expected local convergence order for velocity def local_order_vel(self, order_K, order_quad): if self.description['sweeper_params']['initial_guess'] == 'spread': if self.quad_type == 'GAUSS' or self.quad_type == 'RADAU-RIGHT': @@ -694,229 +205,24 @@ def local_order_vel(self, order_K, order_quad): elif self.quad_type == 'LOBATTO' or self.quad_type == 'RADAU-LEFT': return np.array([np.min([order_K + 1 + 2, order_quad]), np.min([2 * order_K + 2, order_quad])]) else: - raise NotImplementedError('order of convergence explicitly not implemented ') + raise NotImplementedError('Order of convergence explicitly not implemented') else: if self.quad_type == 'GAUSS' or self.quad_type == 'RADAU-RIGHT': return np.array([np.min([order_K + 1, order_quad]), np.min([2 * order_K + 2, order_quad])]) elif self.quad_type == 'LOBATTO' or self.quad_type == 'RADAU-LEFT': return np.array([np.min([order_K + 1, order_quad]), np.min([2 * order_K + 2, order_quad])]) else: - raise NotImplementedError('order of convergence explicitly not implemented ') + raise NotImplementedError('Order of convergence explicitly not implemented') - # find expected global convergence order def global_order(self, order_K, order_quad): if self.quad_type == 'GAUSS' or self.quad_type == 'RADAU-RIGHT': return np.array([np.min([order_K, order_quad]), np.min([2 * order_K, order_quad])]) elif self.quad_type == 'LOBATTO' or self.quad_type == 'RADAU-LEFT': return np.array([np.min([order_K, order_quad]), np.min([2 * order_K, order_quad])]) + 2 else: - raise NotImplementedError('order of convergence explicitly not implemented ') + raise NotImplementedError('Order of convergence explicitly not implemented') - # compute relative error def relative_error(self, uex_data, u_data): u_ex = np.array([entry[1] for entry in uex_data]) u = np.array([entry[1] for entry in u_data]) return np.linalg.norm(np.abs((u_ex - u)), np.inf, 0) / np.linalg.norm(u_ex, np.inf, 0) - - -class Stability_implementation(object): - """ - Routine to compute the stability domains of different configurations of SDC - """ - - def __init__(self, description, kappa_max=20, mu_max=20, Num_iter=(400, 400), cwd=''): - self.description = description - self.kappa_max = kappa_max - self.mu_max = mu_max - self.kappa_iter = Num_iter[0] - self.mu_iter = Num_iter[1] - self.lambda_kappa = np.linspace(0.0, self.kappa_max, self.kappa_iter) - self.lambda_mu = np.linspace(0.0, self.mu_max, self.mu_iter) - self.K_iter = description['step_params']['maxiter'] - self.num_nodes = description['sweeper_params']['num_nodes'] - self.dt = description['level_params']['dt'] - self.SDC, self.Ksdc, self.picard, self.Kpicard = self.stability_data() - self.cwd = cwd - - def stability_data(self): - """ - Computes stability domain matrix for the Harmonic oscillator problem - Returns: - numpy.ndarray: domain_SDC - numpy.ndarray: domain_Ksdc - numpy.ndarray: domain_picard - numpy.ndarray: domain_Kpicard - """ - S = step(description=self.description) - - L = S.levels[0] - - Q = L.sweep.coll.Qmat[1:, 1:] - QQ = np.dot(Q, Q) - num_nodes = L.sweep.coll.num_nodes - dt = L.params.dt - Q_coll = np.block([[QQ, np.zeros([num_nodes, num_nodes])], [np.zeros([num_nodes, num_nodes]), Q]]) - qQ = np.dot(L.sweep.coll.weights, Q) - - ones = np.block([[np.ones(num_nodes), np.zeros(num_nodes)], [np.zeros(num_nodes), np.ones(num_nodes)]]) - - q_mat = np.block( - [ - [dt**2 * qQ, np.zeros(num_nodes)], - [np.zeros(num_nodes), dt * L.sweep.coll.weights], - ] - ) - - domain_SDC = np.zeros((self.kappa_iter, self.mu_iter), dtype="complex") - domain_picard = np.zeros((self.kappa_iter, self.mu_iter)) - domain_Ksdc = np.zeros((self.kappa_iter, self.mu_iter)) - domain_Kpicard = np.zeros((self.kappa_iter, self.mu_iter)) - for i in range(0, self.kappa_iter): - for j in range(0, self.mu_iter): - k = self.lambda_kappa[i] - mu = self.lambda_mu[j] - F = np.block( - [ - [-k * np.eye(num_nodes), -mu * np.eye(num_nodes)], - [-k * np.eye(num_nodes), -mu * np.eye(num_nodes)], - ] - ) - if self.K_iter != 0: - lambdas = [k, mu] - SDC_mat_sweep, Ksdc_eigval = L.sweep.get_scalar_problems_manysweep_mats( - nsweeps=self.K_iter, lambdas=lambdas - ) - if L.sweep.params.picard_mats_sweep: - ( - Picard_mat_sweep, - Kpicard_eigval, - ) = L.sweep.get_scalar_problems_picardsweep_mats(nsweeps=self.K_iter, lambdas=lambdas) - else: - ProblemError("Picard interation is False") - domain_Ksdc[i, j] = Ksdc_eigval - if L.sweep.params.picard_mats_sweep: - domain_Kpicard[i, j] = Kpicard_eigval - - else: - SDC_mat_sweep = np.linalg.inv(np.eye(2 * num_nodes) - dt * np.dot(Q_coll, F)) - - if L.sweep.params.do_coll_update: - FSDC = np.dot(F, SDC_mat_sweep) - Rsdc_mat = np.array([[1.0, dt], [0, 1.0]]) + np.dot(q_mat, FSDC) @ ones.T - stab_func, v = np.linalg.eig(Rsdc_mat) - - if L.sweep.params.picard_mats_sweep: - FPicard = np.dot(F, Picard_mat_sweep) - Rpicard_mat = np.array([[1.0, dt], [0, 1.0]]) + np.dot(q_mat, FPicard) @ ones.T - stab_func_picard, v = np.linalg.eig(Rpicard_mat) - else: - pass - raise ProblemError("Collocation update step is only works for True") - - domain_SDC[i, j] = np.max(np.abs(stab_func)) - if L.sweep.params.picard_mats_sweep: - domain_picard[i, j] = np.max(np.abs(stab_func_picard)) - - return ( - dt * domain_SDC.real, - dt * domain_Ksdc.real, - dt * domain_picard.real, - dt * domain_Kpicard.real, - ) - - def stability_function_RKN(self, k, mu, dt): - """ - Stability function of RKN method - - Returns: - float: maximum absolute values of eigvales - """ - A = np.array([[0, 0, 0, 0], [0.5, 0, 0, 0], [0, 0.5, 0, 0], [0, 0, 1, 0]]) - B = np.array([[0, 0, 0, 0], [0.125, 0, 0, 0], [0.125, 0, 0, 0], [0, 0, 0.5, 0]]) - c = np.array([0, 0.5, 0.5, 1]) - b = np.array([1 / 6, 2 / 6, 2 / 6, 1 / 6]) - bA = np.array([1 / 6, 1 / 6, 1 / 6, 0]) - L = np.eye(4) + k * (dt**2) * B + mu * dt * A - R = np.block([[-k * np.ones(4)], [-(k * c + mu * np.ones(4))]]) - - K = np.linalg.inv(L) @ R.T - C = np.block([[dt**2 * bA], [dt * b]]) - Y = np.array([[1, dt], [0, 1]]) + C @ K - eigval = np.linalg.eigvals(Y) - - return np.max(np.abs(eigval)) - - def stability_data_RKN(self): - """ - Compute and store values into a matrix - - Returns: - numpy.ndarray: stab_RKN - """ - stab_RKN = np.zeros([self.kappa_iter, self.mu_iter]) - for ii, kk in enumerate(self.lambda_kappa): - for jj, mm in enumerate(self.lambda_mu): - stab_RKN[jj, ii] = self.stability_function_RKN(kk, mm, self.dt) - - return stab_RKN - - def plot_stability(self, region, title=""): # pragma: no cover - """ - Plotting runtine for moduli - - Args: - stabval (numpy.ndarray): moduli - title: title for the plot - """ - fixed_plot_params() - lam_k_max = np.amax(self.lambda_kappa) - lam_mu_max = np.amax(self.lambda_mu) - - plt.figure() - levels = np.array([0.25, 0.5, 0.75, 0.9, 1.0, 1.1]) - - CS1 = plt.contour(self.lambda_kappa, self.lambda_mu, region.T, levels, colors='k', linestyles="dashed") - # CS2 = plt.contour(self.lambda_k, self.lambda_mu, np.absolute(region.T), [1.0], colors='r') - - plt.clabel(CS1, inline=True, fmt="%3.2f") - - plt.gca().set_xticks(np.arange(0, int(lam_k_max) + 3, 3)) - plt.gca().set_yticks(np.arange(0, int(lam_mu_max) + 3, 3)) - plt.gca().tick_params(axis="both", which="both") - plt.xlim([0.0, lam_k_max]) - plt.ylim([0.0, lam_mu_max]) - - plt.xlabel(r"$\Delta t\cdot \kappa }$", labelpad=0.0) - plt.ylabel(r"$\Delta t\cdot \mu }$", labelpad=0.0) - if self.RKN: - plt.title(f"{title}") - if self.radius: - plt.title("{} $M={}$".format(title, self.num_nodes)) - else: - plt.title(r"{} $M={},\ K={}$".format(title, self.num_nodes, self.K_iter)) - plt.tight_layout() - plt.savefig(self.cwd + "data/M={}_K={}_redion_{}.pdf".format(self.num_nodes, self.K_iter, title)) - - def run_SDC_stability(self): # pragma: no cover - self.RKN = False - self.radius = False - self.plot_stability(self.SDC, title="SDC stability region") - - def run_Picard_stability(self): # pragma: no cover - self.RKN = False - self.radius = False - self.plot_stability(self.picard, title="Picard stability region") - - def run_Ksdc(self): # pragma: no cover - self.radius = True - self.plot_stability(self.Ksdc, title="$K_{sdc}$ spectral radius") - - def run_Kpicard(self): # pragma: no cover - self.radius = True - self.plot_stability(self.Kpicard, title="$K_{picard}$ spectral radius") - - def run_RKN_stability(self): # pragma: no cover - self.RKN = True - self.radius = False - region_RKN = self.stability_data_RKN() - self.plot_stability(region_RKN.T, title='RKN-4 stability region') diff --git a/pySDC/projects/Second_orderSDC/penningtrap_run_Hamiltonian_error.py b/pySDC/projects/Second_orderSDC/penningtrap_run_Hamiltonian_error.py index 7fae6b6f30..cab9299836 100644 --- a/pySDC/projects/Second_orderSDC/penningtrap_run_Hamiltonian_error.py +++ b/pySDC/projects/Second_orderSDC/penningtrap_run_Hamiltonian_error.py @@ -11,7 +11,7 @@ from pySDC.implementations.sweeper_classes.boris_2nd_order import boris_2nd_order from pySDC.projects.Second_orderSDC.penningtrap_HookClass import particles_output from pySDC.implementations.sweeper_classes.Runge_Kutta_Nystrom import RKN -from pySDC.projects.Second_orderSDC.penningtrap_Simulation import fixed_plot_params +from pySDC.projects.Second_orderSDC.plot_helper import set_fixed_plot_params def main(dt, tend, maxiter, M, sweeper): # pragma: no cover @@ -97,7 +97,7 @@ def plot_Hamiltonian_error(K, M, dt): # pragma: no cover M: number of quadrature nodes dt: time step """ - fixed_plot_params() + set_fixed_plot_params() # Define final time time = 1e6 tn = dt diff --git a/pySDC/projects/Second_orderSDC/penningtrap_run_error.py b/pySDC/projects/Second_orderSDC/penningtrap_run_error.py index 1f74d42193..77156b8355 100644 --- a/pySDC/projects/Second_orderSDC/penningtrap_run_error.py +++ b/pySDC/projects/Second_orderSDC/penningtrap_run_error.py @@ -1,5 +1,5 @@ from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params -from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error +from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError if __name__ == '__main__': """ @@ -25,7 +25,7 @@ description['sweeper_params']['num_nodes'] = 4 ## ============================================================================= # Give the parameters to the class - conv = compute_error(controller_params, description, time_iter=3, K_iter=(1, 2, 3, 10), axes=(2,)) + conv = ComputeError(controller_params, description, time_iter=3, K_iter=(1, 2, 3, 10), axes=(2,)) # Run local convergence order # conv.run_local_error() # Run global convergence order diff --git a/pySDC/projects/Second_orderSDC/penningtrap_run_work_precision.py b/pySDC/projects/Second_orderSDC/penningtrap_run_work_precision.py index eb47975285..eead34df2c 100644 --- a/pySDC/projects/Second_orderSDC/penningtrap_run_work_precision.py +++ b/pySDC/projects/Second_orderSDC/penningtrap_run_work_precision.py @@ -1,7 +1,7 @@ # It checks whether data folder exicits or not exec(open("check_data_folder.py").read()) -from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error +from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params if __name__ == '__main__': @@ -10,7 +10,7 @@ All parameters are given in penningtrap_params Note: * time needs to be changed according to choosen axis - * Tend fixed but it can be changed by defining Tend and include in it in compute_error + * Tend fixed but it can be changed by defining Tend and include in it in ComputeError * To implement Velocity-Verlet scheme set VV=True like run_work_precision(VV=True) * RKN method can be removed by setting RKN=False like run_work_precision(RKN=False) * dt timestep can be changed here as well @@ -24,5 +24,5 @@ description['level_params']['dt'] = 0.015625 * 4 description['sweeper_params']['initial_guess'] = 'spread' # 'zero', 'spread' ## ============================================================================= - work_pre = compute_error(controller_params, description, time_iter=3, Tend=Tend, K_iter=(1, 2, 3), axes=(2,)) + work_pre = ComputeError(controller_params, description, time_iter=3, Tend=Tend, K_iter=(1, 2, 3), axes=(2,)) work_pre.run_work_precision(RK=True) diff --git a/pySDC/projects/Second_orderSDC/plot_helper.py b/pySDC/projects/Second_orderSDC/plot_helper.py new file mode 100644 index 0000000000..61514d6bfe --- /dev/null +++ b/pySDC/projects/Second_orderSDC/plot_helper.py @@ -0,0 +1,406 @@ +# import matplotlib + +# matplotlib.use('Agg') +# import os + +import numpy as np +import matplotlib.pyplot as plt + +FONT_SIZE = 16 +FIG_SIZE = (7.44, 6.74) + + +def set_fixed_plot_params(): # pragma: no cover + """ + Set fixed parameters for all plots + """ + plt.rcParams['figure.figsize'] = FIG_SIZE + plt.rcParams['pgf.rcfonts'] = False + + plt.rcParams['lines.linewidth'] = 2.5 + plt.rcParams['axes.titlesize'] = FONT_SIZE + 5 + plt.rcParams['axes.labelsize'] = FONT_SIZE + 5 + plt.rcParams['xtick.labelsize'] = FONT_SIZE + plt.rcParams['ytick.labelsize'] = FONT_SIZE + plt.rcParams['xtick.major.pad'] = 5 + plt.rcParams['ytick.major.pad'] = 5 + plt.rcParams['axes.labelpad'] = 6 + plt.rcParams['lines.markersize'] = FONT_SIZE - 2 + plt.rcParams['lines.markeredgewidth'] = 1 + plt.rcParams['mathtext.fontset'] = 'cm' + plt.rcParams['mathtext.rm'] = 'serif' + plt.rc('font', size=FONT_SIZE) + + +class PlotManager(object): # pragma: no cover + """ + This class generates all of the plots of the Second-order SDC plots. + """ + + def __init__(self, controller_params, description, time_iter=3, K_iter=(1, 2, 3), Tend=2, axes=(1,), cwd=''): + self.controller_params = controller_params + self.description = description + self.time_iter = time_iter + self.K_iter = K_iter + self.Tend = Tend + self.axes = axes + self.cwd = cwd + self.quad_type = self.description['sweeper_params']['quad_type'] + self.num_nodes = self.description['sweeper_params']['num_nodes'] + self.error_type = 'local' + + def plot_convergence(self): + """ + Plot convergence order plots for the position and velocity + If you change parameters of the values you need set y_lim values need to set manually + """ + set_fixed_plot_params() + [N, time_data, error_data, order_data, convline] = self.organize_data( + filename='data/dt_vs_{}_errorSDC.csv'.format(self.error_type) + ) + + color = ['r', 'brown', 'g', 'blue'] + shape = ['o', 'd', 's', 'x'] + + fig1, ax1 = plt.subplots() + fig2, ax2 = plt.subplots() + value = self.axes[0] + for ii in range(0, N): + ax1.loglog(time_data[ii, :], convline['pos'][value, ii, :], color='black') + ax1.loglog( + time_data[ii, :], + error_data['pos'][value, ii, :], + ' ', + color=color[ii], + marker=shape[ii], + label='k={}'.format(int(self.K_iter[ii])), + ) + if value == 2: + ax1.text( + time_data[ii, 1], + 0.25 * convline['pos'][value, ii, 1], + r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['pos'][ii, 0, 1]), + size=18, + ) + else: + ax1.text( + time_data[ii, 1], + 0.25 * convline['pos'][value, ii, 1], + r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['pos'][ii, 0, 0]), + size=18, + ) + + if self.error_type == 'Local': + ax1.set_ylabel(r'$\Delta x^{\mathrm{(abs)}}_{%d}$' % (value + 1)) + else: + ax1.set_ylabel(r'$\Delta x^{\mathrm{(rel)}}_{%d}$' % (value + 1)) + ax1.set_title('{} order of convergence, $M={}$'.format(self.error_type, self.num_nodes)) + ax1.set_xlabel(r'$\omega_{B} \cdot \Delta t$') + + ax1.legend(loc='best') + fig1.tight_layout() + fig1.savefig(self.cwd + 'data/{}_conv_plot_pos{}.pdf'.format(self.error_type, value + 1)) + + for ii in range(0, N): + ax2.loglog(time_data[ii, :], convline['vel'][value, ii, :], color='black') + ax2.loglog( + time_data[ii, :], + error_data['vel'][value, ii, :], + ' ', + color=color[ii], + marker=shape[ii], + label='k={}'.format(int(self.K_iter[ii])), + ) + + if value == 2: + ax2.text( + time_data[ii, 1], + 0.25 * convline['vel'][value, ii, 1], + r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['vel'][ii, 0, 1]), + size=18, + ) + else: + ax2.text( + time_data[ii, 1], + 0.25 * convline['vel'][value, ii, 1], + r"$\mathcal{O}(\Delta t^{%d})$" % (order_data['vel'][ii, 0, 0]), + size=18, + ) + + if self.error_type == 'Local': + ax2.set_ylabel(r'$\Delta v^{\mathrm{(abs)}}_{%d}$' % (value + 1)) + else: + ax2.set_ylabel(r'$\Delta v^{\mathrm{(rel)}}_{%d}$' % (value + 1)) + ax2.set_title(r'{} order of convergence, $M={}$'.format(self.error_type, self.num_nodes)) + ax2.set_xlabel(r'$\omega_{B} \cdot \Delta t$') + # ============================================================================= + # Setting y axis min and max values + # ============================================================================= + if self.error_type == 'global': + ax2.set_ylim(1e-14, 1e1) + ax1.set_ylim(1e-14, 1e1) + else: + ax2.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) + ax1.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) + ax2.legend(loc='best') + fig2.tight_layout() + plt.show() + fig2.savefig(self.cwd + 'data/{}_conv_plot_vel{}.pdf'.format(self.error_type, value + 1)) + + def format_number(self, data_value, indx): + """ + Change format of the x axis for the work precision plots + """ + if data_value >= 1_000_000: + formatter = "{:1.1f}M".format(data_value * 0.000_001) + else: + formatter = "{:1.0f}K".format(data_value * 0.001) + return formatter + + def plot_work_precision(self): + """ + Generate work precision plots + """ + set_fixed_plot_params() + [N, func_eval_SDC, error_SDC, *_] = self.organize_data( + filename=self.cwd + 'data/rhs_eval_vs_global_errorSDC.csv', + time_iter=self.time_iter, + ) + + [N, func_eval_picard, error_picard, *_] = self.organize_data( + filename=self.cwd + 'data/rhs_eval_vs_global_errorPicard.csv', + time_iter=self.time_iter, + ) + + color = ['r', 'brown', 'g', 'blue'] + shape = ['o', 'd', 's', 'x'] + fig1, ax1 = plt.subplots() + fig2, ax2 = plt.subplots() + value = self.axes[0] + + if self.RK: + [N, func_eval_RKN, error_RKN, *_] = self.organize_data( + filename=self.cwd + 'data/rhs_eval_vs_global_errorRKN.csv', + time_iter=self.time_iter, + ) + + ax1.loglog( + func_eval_RKN[0], + error_RKN['pos'][value,][0][:], + ls='dashdot', + color='purple', + marker='p', + label='RKN-4', + ) + ax2.loglog( + func_eval_RKN[0], + error_RKN['vel'][value,][0][:], + ls='dashdot', + color='purple', + marker='p', + label='RKN-4', + ) + if self.VV: + [N, func_eval_VV, error_VV, *_] = self.organize_data( + filename=self.cwd + 'data/rhs_eval_vs_global_errorVV.csv', + time_iter=self.time_iter, + ) + + ax1.loglog( + func_eval_VV[0], + error_VV['pos'][value,][0][:], + ls='dashdot', + color='blue', + marker='H', + label='Velocity-Verlet', + ) + ax2.loglog( + func_eval_VV[0], + error_VV['vel'][value,][0][:], + ls='dashdot', + color='blue', + marker='H', + label='Velocity-Verlet', + ) + + for ii, jj in enumerate(self.K_iter): + # ============================================================================= + # # If you want to get exactly the same picture like in paper uncomment this only for vertical axis + # if ii==0 or ii==1: + # ax1.loglog(func_eval_SDC[ii, :][1:], error_SDC['pos'][value, ii, :][1:], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) + # ax1.loglog(func_eval_picard[ii,:][1:], error_picard['pos'][value, ii, :][1:], ls='--', color=color[ii], marker=shape[ii]) + + # ax2.loglog(func_eval_SDC[ii, :][1:], error_SDC['vel'][value, ii, :][1:], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) + # ax2.loglog(func_eval_picard[ii,:][1:], error_picard['vel'][value, ii, :][1:], ls='--', color=color[ii], marker=shape[ii]) + # else: + + # ax1.loglog(func_eval_SDC[ii, :][:-1], error_SDC['pos'][value, ii, :][:-1], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) + # ax1.loglog(func_eval_picard[ii,:][:-1], error_picard['pos'][value, ii, :][:-1], ls='--', color=color[ii], marker=shape[ii]) + + # ax2.loglog(func_eval_SDC[ii, :][:-1], error_SDC['vel'][value, ii, :][:-1], ls='solid', color=color[ii], marker=shape[ii], label='k={}'.format(jj)) + # ax2.loglog(func_eval_picard[ii,:][:-1], error_picard['vel'][value, ii, :][:-1], ls='--', color=color[ii], marker=shape[ii]) + # + # ============================================================================= + ax1.loglog( + func_eval_SDC[ii, :], + error_SDC['pos'][value, ii, :], + ls='solid', + color=color[ii], + marker=shape[ii], + label='k={}'.format(jj), + ) + ax1.loglog( + func_eval_picard[ii, :], error_picard['pos'][value, ii, :], ls='--', color=color[ii], marker=shape[ii] + ) + + ax2.loglog( + func_eval_SDC[ii, :], + error_SDC['vel'][value, ii, :], + ls='solid', + color=color[ii], + marker=shape[ii], + label='k={}'.format(jj), + ) + ax2.loglog( + func_eval_picard[ii, :], error_picard['vel'][value, ii, :], ls='--', color=color[ii], marker=shape[ii] + ) + + xmin = np.min(ax1.get_xlim()) + xmax = np.max(ax2.get_xlim()) + xmin = round(xmin, -3) + xmax = round(xmax, -3) + + xx = np.linspace(np.log(xmin), np.log(xmax), 5) + xx = 3**xx + xx = xx[np.where(xx < xmax)] + # xx=[2*1e+3,4*1e+3, 8*1e+3] + ax1.grid(True) + + ax1.set_title("$M={}$".format(self.num_nodes)) + ax1.set_xlabel("Number of RHS evaluations") + ax1.set_ylabel(r'$\Delta x^{\mathrm{(rel)}}_{%d}$' % (value + 1)) + ax1.loglog([], [], color="black", ls="--", label="Picard iteration") + ax1.loglog([], [], color="black", ls="solid", label="Boris-SDC iteration") + + ax1.set_xticks(xx) + ax1.xaxis.set_major_formatter(self.format_number) + ax1.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) + # ax1.set_ylim(1e-14, 1e+0) + + ax1.legend(loc="best", fontsize=12) + fig1.tight_layout() + fig1.savefig(self.cwd + "data/f_eval_pos_{}_M={}.pdf".format(value, self.num_nodes)) + + ax2.grid(True) + ax2.xaxis.set_major_formatter(self.format_number) + ax2.set_title("$M={}$".format(self.num_nodes)) + ax2.set_xlabel("Number of RHS evaluations") + ax2.set_ylabel(r'$\Delta v^{\mathrm{(rel)}}_{%d}$' % (value + 1)) + ax2.loglog([], [], color="black", ls="--", label="Picard iteration") + ax2.loglog([], [], color="black", ls="solid", label="Boris-SDC iteration") + ax2.set_xticks(xx) + ax2.xaxis.set_major_formatter(self.format_number) + ax2.set_ylim(np.min(ax1.get_ylim()), np.max(ax2.get_ylim())) + # ax2.set_ylim(1e-14, 1e+0) + ax2.legend(loc="best", fontsize=12) + fig2.tight_layout() + fig2.savefig(self.cwd + "data/f_eval_vel_{}_M={}.pdf".format(value, self.num_nodes)) + plt.show() + + def organize_data(self, filename='data/dt_vs_local_errorSDC.csv', time_iter=None): + """ + Organize data according to plot + Args: + filename (string): data to find approximate order + time_iter : in case it you used different time iterations + """ + if time_iter == None: + time_iter = self.time_iter + + items = np.genfromtxt(filename, delimiter=',', skip_header=1) + time = items[:, 0] + N = int(np.size(time) / time_iter) + + error_data = {'pos': np.zeros([3, N, time_iter]), 'vel': np.zeros([3, N, time_iter])} + order_data = {'pos': np.zeros([N, time_iter, 2]), 'vel': np.zeros([N, time_iter, 2])} + time_data = np.zeros([N, time_iter]) + convline = {'pos': np.zeros([3, N, time_iter]), 'vel': np.zeros([3, N, time_iter])} + + time_data = time.reshape([N, time_iter]) + + order_data['pos'][:, :, 0] = items[:, 1].reshape([N, time_iter]) + order_data['pos'][:, :, 1] = items[:, 2].reshape([N, time_iter]) + order_data['vel'][:, :, 0] = items[:, 6].reshape([N, time_iter]) + order_data['vel'][:, :, 1] = items[:, 7].reshape([N, time_iter]) + + for ii in range(0, 3): + error_data['pos'][ii, :, :] = items[:, ii + 3].reshape([N, time_iter]) + error_data['vel'][ii, :, :] = items[:, ii + 8].reshape([N, time_iter]) + + for jj in range(0, 3): + if jj == 2: + convline['pos'][jj, :, :] = ( + (time_data / time_data[0, 0]).T ** order_data['pos'][:, jj, 1] + ).T * error_data['pos'][jj, :, 0][:, None] + convline['vel'][jj, :, :] = ( + (time_data / time_data[0, 0]).T ** order_data['vel'][:, jj, 1] + ).T * error_data['vel'][jj, :, 0][:, None] + else: + convline['pos'][jj, :, :] = ( + (time_data / time_data[0, 0]).T ** order_data['pos'][:, jj, 0] + ).T * error_data['pos'][jj, :, 0][:, None] + convline['vel'][jj, :, :] = ( + (time_data / time_data[0, 0]).T ** order_data['vel'][:, jj, 0] + ).T * error_data['vel'][jj, :, 0][:, None] + + return [N, time_data, error_data, order_data, convline] + + # find approximate order + def find_approximate_order(self, filename='data/dt_vs_local_errorSDC.csv'): + """ + This function finds approximate convergence rate and saves in the data folder + Args: + filename: given data + return: + None + """ + [N, time_data, error_data, order_data, convline] = self.organize_data(self.cwd + filename) + approx_order = {'pos': np.zeros([1, N]), 'vel': np.zeros([1, N])} + + for jj in range(0, 3): + if jj == 0: + file = open(self.cwd + 'data/{}_order_vs_approx_order.csv'.format(self.error_type), 'w') + + else: + file = open(self.cwd + 'data/{}_order_vs_approx_order.csv'.format(self.error_type), 'a') + + for ii in range(0, N): + approx_order['pos'][0, ii] = np.polyfit( + np.log(time_data[ii, :]), np.log(error_data['pos'][jj, ii, :]), 1 + )[0].real + approx_order['vel'][0, ii] = np.polyfit( + np.log(time_data[ii, :]), np.log(error_data['vel'][jj, ii, :]), 1 + )[0].real + if jj == 2: + file.write( + str(order_data['pos'][:, jj, 1]) + + ' | ' + + str(approx_order['pos'][0]) + + ' | ' + + str(order_data['vel'][:, jj, 1]) + + ' | ' + + str(approx_order['vel'][0]) + + '\n' + ) + else: + file.write( + str(order_data['pos'][:, jj, 0]) + + ' | ' + + str(approx_order['pos'][0]) + + ' | ' + + str(order_data['vel'][:, jj, 0]) + + ' | ' + + str(approx_order['vel'][0]) + + '\n' + ) + file.close() diff --git a/pySDC/projects/Second_orderSDC/stability_simulation.py b/pySDC/projects/Second_orderSDC/stability_simulation.py new file mode 100644 index 0000000000..f8889d1e40 --- /dev/null +++ b/pySDC/projects/Second_orderSDC/stability_simulation.py @@ -0,0 +1,324 @@ +import numpy as np +import matplotlib.pyplot as plt +from pySDC.core.Errors import ProblemError +from pySDC.core.Step import step + +from pySDC.projects.Second_orderSDC.plot_helper import set_fixed_plot_params + + +class StabilityImplementation: + """ + This class computes and implements stability region of the harmonic oscillator problem + by using different methods (SDC, Picard, RKN). + + Parameters + ----------- + description: gets default paramets for the problem class + kappa_max: maximum value of kappa can reach + mu_max: maximum value of mu can reach + Num_iter: maximum iterations for the kappa and mu on the x and y axes + cwd: current working + + """ + + def __init__(self, description, kappa_max=20, mu_max=20, Num_iter=(400, 400), cwd=''): + self.description = description + self.kappa_max = kappa_max + self.mu_max = mu_max + self.kappa_iter = Num_iter[0] + self.mu_iter = Num_iter[1] + self.lambda_kappa = np.linspace(0.0, self.kappa_max, self.kappa_iter) + self.lambda_mu = np.linspace(1e-10, self.mu_max, self.mu_iter) + + self.K_iter = description['step_params']['maxiter'] + self.num_nodes = description['sweeper_params']['num_nodes'] + self.dt = description['level_params']['dt'] + self.SDC, self.Ksdc, self.picard, self.Kpicard = self.stability_data() + self.cwd = cwd + + def stability_data(self): + """ + Computes stability domain matrix for the Harmonic oscillator problem + Returns: + numpy.ndarray: domain_SDC + numpy.ndarray: domain_Ksdc + numpy.ndarray: domain_picard + numpy.ndarray: domain_Kpicard + """ + S = step(description=self.description) + # Define L to get access level params and functions + L = S.levels[0] + # Number of nodes + num_nodes = L.sweep.coll.num_nodes + # Time step + dt = L.params.dt + + # Define Collocation matrix to find for the stability function + Q = L.sweep.coll.Qmat[1:, 1:] + QQ = np.dot(Q, Q) + Q_coll = np.block([[QQ, np.zeros([num_nodes, num_nodes])], [np.zeros([num_nodes, num_nodes]), Q]]) + qQ = np.dot(L.sweep.coll.weights, Q) + # Matrix with all entries 1 + ones = np.block([[np.ones(num_nodes), np.zeros(num_nodes)], [np.zeros(num_nodes), np.ones(num_nodes)]]) + # Combine all of the weights into a single matrix + q_mat = np.block( + [ + [dt**2 * qQ, np.zeros(num_nodes)], + [np.zeros(num_nodes), dt * L.sweep.coll.weights], + ] + ) + # Zeros matrices to store the values for the stability region values + domain_SDC = np.zeros((self.kappa_iter, self.mu_iter), dtype="complex") + domain_picard = np.zeros((self.kappa_iter, self.mu_iter)) + domain_Ksdc = np.zeros((self.kappa_iter, self.mu_iter)) + domain_Kpicard = np.zeros((self.kappa_iter, self.mu_iter)) + # Loop over the different values of the kappa and mu values + for i in range(0, self.kappa_iter): + for j in range(0, self.mu_iter): + k = self.lambda_kappa[i] + mu = self.lambda_mu[j] + # Build right hand side matrix function for the harmonic oscillator problem + F = np.block( + [ + [-k * np.eye(num_nodes), -mu * np.eye(num_nodes)], + [-k * np.eye(num_nodes), -mu * np.eye(num_nodes)], + ] + ) + + if self.K_iter != 0: + # num iteration is not equal to zero then do SDC and Picard iteration + lambdas = [k, mu] + SDC_mat_sweep, Ksdc_eigval = L.sweep.get_scalar_problems_manysweep_mats( + nsweeps=self.K_iter, lambdas=lambdas + ) + # If picard_mats_sweep=True then do also Picard iteration + if L.sweep.params.picard_mats_sweep: + ( + Picard_mat_sweep, + Kpicard_eigval, + ) = L.sweep.get_scalar_problems_picardsweep_mats(nsweeps=self.K_iter, lambdas=lambdas) + else: + ProblemError("Picard iteration is not enabled. Set 'picard_mats_sweep' to True to enable.") + domain_Ksdc[i, j] = Ksdc_eigval + if L.sweep.params.picard_mats_sweep: + domain_Kpicard[i, j] = Kpicard_eigval + + else: + # Otherwise Collocation problem + SDC_mat_sweep = np.linalg.inv(np.eye(2 * num_nodes) - dt * np.dot(Q_coll, F)) + # Collation update for both Picard and SDC iterations + if L.sweep.params.do_coll_update: + FSDC = np.dot(F, SDC_mat_sweep) + Rsdc_mat = np.array([[1.0, dt], [0, 1.0]]) + np.dot(q_mat, FSDC) @ ones.T + stab_func, v = np.linalg.eig(Rsdc_mat) + + if L.sweep.params.picard_mats_sweep: + FPicard = np.dot(F, Picard_mat_sweep) + Rpicard_mat = np.array([[1.0, dt], [0, 1.0]]) + np.dot(q_mat, FPicard) @ ones.T + stab_func_picard, v = np.linalg.eig(Rpicard_mat) + else: + raise ProblemError("Collocation update step works only when 'do_coll_update' is set to True.") + # Find and store spectral radius + domain_SDC[i, j] = np.max(np.abs(stab_func)) + if L.sweep.params.picard_mats_sweep: + domain_picard[i, j] = np.max(np.abs(stab_func_picard)) + + return ( + dt * domain_SDC.real, + dt * domain_Ksdc.real, + dt * domain_picard.real, + dt * domain_Kpicard.real, + ) + + def stability_function_RKN(self, k, mu, dt): + """ + Stability function of RKN method + + Returns: + float: maximum absolute values of eigvales + """ + A = np.array([[0, 0, 0, 0], [0.5, 0, 0, 0], [0, 0.5, 0, 0], [0, 0, 1, 0]]) + B = np.array([[0, 0, 0, 0], [0.125, 0, 0, 0], [0.125, 0, 0, 0], [0, 0, 0.5, 0]]) + c = np.array([0, 0.5, 0.5, 1]) + b = np.array([1 / 6, 2 / 6, 2 / 6, 1 / 6]) + bA = np.array([1 / 6, 1 / 6, 1 / 6, 0]) + L = np.eye(4) + k * (dt**2) * B + mu * dt * A + R = np.block([[-k * np.ones(4)], [-(k * c + mu * np.ones(4))]]) + + K = np.linalg.inv(L) @ R.T + C = np.block([[dt**2 * bA], [dt * b]]) + Y = np.array([[1, dt], [0, 1]]) + C @ K + eigval = np.linalg.eigvals(Y) + + return np.max(np.abs(eigval)) + + def stability_data_RKN(self): + """ + Compute and store values into a matrix + + Returns: + numpy.ndarray: stab_RKN + """ + stab_RKN = np.zeros([self.kappa_iter, self.mu_iter]) + for ii, kk in enumerate(self.lambda_kappa): + for jj, mm in enumerate(self.lambda_mu): + stab_RKN[jj, ii] = self.stability_function_RKN(kk, mm, self.dt) + + return stab_RKN + + def plot_stability(self, region, title=""): # pragma: no cover + """ + Plotting runtine for moduli + + Args: + stabval (numpy.ndarray): moduli + title: title for the plot + """ + set_fixed_plot_params() + lam_k_max = np.amax(self.lambda_kappa) + lam_mu_max = np.amax(self.lambda_mu) + + plt.figure() + levels = np.array([0.25, 0.5, 0.75, 0.9, 1.0, 1.1]) + + CS1 = plt.contour(self.lambda_kappa, self.lambda_mu, region.T, levels, colors='k', linestyles="dashed") + # CS2 = plt.contour(self.lambda_k, self.lambda_mu, np.absolute(region.T), [1.0], colors='r') + + plt.clabel(CS1, inline=True, fmt="%3.2f") + + plt.gca().set_xticks(np.arange(0, int(lam_k_max) + 3, 3)) + plt.gca().set_yticks(np.arange(0, int(lam_mu_max) + 3, 3)) + plt.gca().tick_params(axis="both", which="both") + plt.xlim([0.0, lam_k_max]) + plt.ylim([0.0, lam_mu_max]) + + plt.xlabel(r"$\Delta t\cdot \kappa$", labelpad=0.0) + plt.ylabel(r"$\Delta t\cdot \mu$", labelpad=0.0) + if self.RKN: + plt.title(f"{title}") + if self.radius: + plt.title("{} $M={}$".format(title, self.num_nodes)) + else: + plt.title(r"{} $M={},\ K={}$".format(title, self.num_nodes, self.K_iter)) + plt.tight_layout() + plt.savefig(self.cwd + "data/M={}_K={}_redion_{}.pdf".format(self.num_nodes, self.K_iter, title)) + + def run_SDC_stability(self): # pragma: no cover + self.RKN = False + self.radius = False + self.plot_stability(self.SDC, title="SDC stability region") + + def run_Picard_stability(self): # pragma: no cover + self.RKN = False + self.radius = False + self.plot_stability(self.picard, title="Picard stability region") + + def run_Ksdc(self): # pragma: no cover + self.radius = True + self.plot_stability(self.Ksdc, title="$K_{sdc}$ spectral radius") + + def run_Kpicard(self): # pragma: no cover + self.radius = True + self.plot_stability(self.Kpicard, title="$K_{picard}$ spectral radius") + + def run_RKN_stability(self): # pragma: no cover + self.RKN = True + self.radius = False + region_RKN = self.stability_data_RKN() + self.plot_stability(region_RKN.T, title='RKN-4 stability region') + + +def check_points_and_interval(description, helper_params, point, compute_interval=False, check_stability_point=False): + # Storage for stability interval and stability check + interval_data = [] + points_data = [] + + # Loop through different numbers of nodes and maximum iterations + for quad_type in helper_params['quad_type_list']: + for num_nodes in helper_params['num_nodes_list']: + for max_iter in helper_params['max_iter_list']: + # Update simulation parameters + description['sweeper_params']['num_nodes'] = num_nodes + description['sweeper_params']['quad_type'] = quad_type + description['step_params']['maxiter'] = max_iter + + # Create Stability_implementation instance for stability check + + stab_model = StabilityImplementation( + description, kappa_max=point[0], mu_max=point[1], Num_iter=helper_params['Num_iter'] + ) + if compute_interval: + # Extract the values where SDC is less than or equal to 1 + mask = stab_model.picard <= 1 + 1e-14 + for ii in range(len(mask)): + if mask[ii]: + kappa_max_interval = stab_model.lambda_kappa[ii] + else: + break + + # Add row to the interval data + interval_data.append([quad_type, num_nodes, max_iter, kappa_max_interval]) + + if check_stability_point: + # Check stability and print results + if stab_model.SDC[-1, -1] <= 1: + stability_result = "Stable" + else: + stability_result = "Unstable. Increase M or K" + + # Add row to the results data + points_data.append( + [quad_type, num_nodes, max_iter, point, stability_result, stab_model.SDC[-1, -1]] + ) + if compute_interval: + return interval_data + else: + return points_data + + +def compute_and_generate_table( + description, + helper_params, + point, + compute_interval=False, + save_interval_file=False, + interval_filename='./data/stab_interval.txt', + check_stability_point=False, + save_points_table=False, + points_table_filename='./data/point_table.txt', + quadrature_list=('GAUSS', 'LOBATTO'), +): # pragma: no cover + from tabulate import tabulate + + if compute_interval: + interval_data = check_points_and_interval(description, helper_params, point, compute_interval=compute_interval) + else: + points_data = check_points_and_interval( + description, helper_params, point, check_stability_point=check_stability_point + ) + + # Define column names for interval data + interval_headers = ["Quad Type", "Num Nodes", "Max Iter", 'kappa_max'] + + # Define column names for results data + points_headers = ["Quad Type", "Num Nodes", "Max Iter", "(kappa, mu)", "Stability", "Spectral Radius"] + # Print or save the tables using tabulate + if save_interval_file and compute_interval: + interval_table_str = tabulate(interval_data, headers=interval_headers, tablefmt="grid") + with open(interval_filename, 'w') as file: + file.write(interval_table_str) + print(f"Stability Interval Table saved to {interval_filename}") + + if save_points_table and check_stability_point: + points_table_str = tabulate(points_data, headers=points_headers, tablefmt="grid") + with open(points_table_filename, 'w') as file: + file.write(points_table_str) + print(f"Stability Results Table saved to {points_table_filename}") + + if compute_interval: + print("Stability Interval Table:") + print(tabulate(interval_data, headers=interval_headers, tablefmt="grid")) + + if check_stability_point: + print("\nStability Results Table:") + print(tabulate(points_data, headers=points_headers, tablefmt="grid")) diff --git a/pySDC/tests/test_projects/test_second_orderSDC/test_convergence.py b/pySDC/tests/test_projects/test_second_orderSDC/test_convergence.py index 3d16a049f4..a459cb269f 100644 --- a/pySDC/tests/test_projects/test_second_orderSDC/test_convergence.py +++ b/pySDC/tests/test_projects/test_second_orderSDC/test_convergence.py @@ -35,11 +35,11 @@ def test_global_convergence(axis): def BorisSDC_global_convergence(): from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error + from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError controller_params, description = penningtrap_params() description['level_params']['dt'] = 0.015625 * 2 - conv = compute_error(controller_params, description, time_iter=3, K_iter=(1, 2, 3)) + conv = ComputeError(controller_params, description, time_iter=3, K_iter=(1, 2, 3)) conv.error_type = 'global' conv.compute_global_error_data() @@ -85,13 +85,13 @@ def sort_order(cwd='', filename='data/local_order_vs_approx_order.csv'): def BorisSDC_horizontal_axis(): - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error + from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params controller_params, description = penningtrap_params() - description['level_params']['dt'] = 0.015625 / 8 + description['level_params']['dt'] = 0.015625 / 4 - conv = compute_error(controller_params, description, time_iter=3) + conv = ComputeError(controller_params, description, time_iter=3) conv.compute_local_error_data() conv.find_approximate_order() @@ -113,13 +113,13 @@ def test_horizontal_axis(value): def BorisSDC_vertical_axis(): - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error + from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params controller_params, description = penningtrap_params() - description['level_params']['dt'] = 0.015625 * 8 + description['level_params']['dt'] = 0.015625 * 4 - conv = compute_error(controller_params, description, time_iter=3) + conv = ComputeError(controller_params, description, time_iter=3) conv.compute_local_error_data() conv.find_approximate_order() @@ -150,7 +150,7 @@ def numerical_order(time_data, error): @pytest.mark.parametrize('sweeper_name', METHODS) def test_RKN_VV(sweeper_name, cwd=''): import numpy as np - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import compute_error + from pySDC.projects.Second_orderSDC.penningtrap_Simulation import ComputeError from pySDC.projects.Second_orderSDC.penningtrap_params import penningtrap_params controller_params, description = penningtrap_params() @@ -160,7 +160,7 @@ def test_RKN_VV(sweeper_name, cwd=''): time_iter = np.array([1, 1 / 2, 1 / 4]) time = description['level_params']['dt'] * time_iter - P = compute_error(controller_params, description, time_iter=3) + P = ComputeError(controller_params, description, time_iter=3) if sweeper_name == 'Velocity_Verlet': P.compute_global_error_data(VV=True, work_counter=True) diff --git a/pySDC/tests/test_projects/test_second_orderSDC/test_stability.py b/pySDC/tests/test_projects/test_second_orderSDC/test_stability.py index d0575367d6..54e6f55dcf 100644 --- a/pySDC/tests/test_projects/test_second_orderSDC/test_stability.py +++ b/pySDC/tests/test_projects/test_second_orderSDC/test_stability.py @@ -2,51 +2,56 @@ @pytest.mark.base -def test_stability(): +def test_stability_SDC(): """ Stability domain test only the values of mu=[6, 20] and kappa=[3, 20] It is stable at mu=6, kappa=3 otherwise it is instable """ import numpy as np - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import Stability_implementation - from pySDC.projects.Second_orderSDC.dampedharmonic_oscillator_run_stability import dampedharmonic_oscillator_params - - description = dampedharmonic_oscillator_params() - Stability = Stability_implementation(description, kappa_max=14, mu_max=14, Num_iter=(2, 2)) - Stability.lambda_kappa = np.array([6, 20]) - Stability.lambda_mu = np.array([3, 20]) - SDC, KSDC, *_ = Stability.stability_data() - assert ( - SDC[0, 0] <= 1 - ), f'The SDC method is instable at mu={Stability.lambda_mu[0]} and kappa={Stability.lambda_kappa[0]}' - assert ( - SDC[-1, -1] > 1 - ), f'The SDC method is stable at mu={Stability.lambda_mu[-1]} and kappa={Stability.lambda_kappa[-1]}' + from pySDC.projects.Second_orderSDC.stability_simulation import check_points_and_interval + from pySDC.projects.Second_orderSDC.harmonic_oscillator_params import get_default_harmonic_oscillator_description + + description = get_default_harmonic_oscillator_description() + # Additonal params to compute stability points + helper_params = { + 'quad_type_list': ('GAUSS',), + 'Num_iter': (2, 2), + 'num_nodes_list': np.arange(3, 4, 1), + 'max_iter_list': np.arange(5, 6, 1), + } + + points = ((3, 6), (20, 20)) + # Iterate through points and perform stability check + point0 = check_points_and_interval(description, helper_params, points[0], check_stability_point=True) + point1 = check_points_and_interval(description, helper_params, points[1], check_stability_point=True) + + assert point0[-1][-1] <= 1, f'The SDC method is instable at mu={points[0][1]} and kappa={points[0][0]}' + assert point1[-1][-1] > 1, f'The SDC method is stable at mu={points[1][1]} and kappa={points[1][0]}' @pytest.mark.base def test_RKN_stability(): """ - Stability domain test - only the values of mu=[6, 20] and kappa=[3, 20] + Stability domain test for RKN + only the values of mu=[1, 20] and kappa=[1, 20] It is stable at mu=6, kappa=3 otherwise it is instable """ import numpy as np - from pySDC.projects.Second_orderSDC.penningtrap_Simulation import Stability_implementation - from pySDC.projects.Second_orderSDC.dampedharmonic_oscillator_run_stability import dampedharmonic_oscillator_params - - description = dampedharmonic_oscillator_params() - Stability = Stability_implementation(description, kappa_max=14, mu_max=14, Num_iter=(2, 2)) - Stability.lambda_kappa = np.array([1, 20]) - Stability.lambda_mu = np.array([1, 20]) - stab_RKN = Stability.stability_data_RKN() + from pySDC.projects.Second_orderSDC.stability_simulation import StabilityImplementation + from pySDC.projects.Second_orderSDC.harmonic_oscillator_params import get_default_harmonic_oscillator_description + + description = get_default_harmonic_oscillator_description() + stability = StabilityImplementation(description, kappa_max=14, mu_max=14, Num_iter=(2, 2)) + stability.lambda_kappa = np.array([1, 20]) + stability.lambda_mu = np.array([1, 20]) + stab_RKN = stability.stability_data_RKN() assert ( stab_RKN[0, 0] <= 1 - ), f'The SDC method is instable at mu={Stability.lambda_mu[0]} and kappa={Stability.lambda_kappa[0]}' + ), f'The RKN method is instable at mu={stability.lambda_mu[0]} and kappa={stability.lambda_kappa[0]}' assert ( stab_RKN[-1, -1] > 1 - ), f'The SDC method is stable at mu={Stability.lambda_mu[-1]} and kappa={Stability.lambda_kappa[-1]}' + ), f'The RKN method is stable at mu={stability.lambda_mu[-1]} and kappa={stability.lambda_kappa[-1]}' if __name__ == '__main__':