diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml index fb71fe45af9..366cb1183a0 100644 --- a/.github/workflows/SpellCheck.yml +++ b/.github/workflows/SpellCheck.yml @@ -10,4 +10,4 @@ jobs: - name: Checkout Actions Repository uses: actions/checkout@v4 - name: Check spelling - uses: crate-ci/typos@v1.16.21 + uses: crate-ci/typos@v1.16.23 diff --git a/NEWS.md b/NEWS.md index 5dd911391a6..265979c3508 100644 --- a/NEWS.md +++ b/NEWS.md @@ -7,6 +7,7 @@ for human readability. ## Changes when updating to v0.6 from v0.5.x #### Added +- AMR for hyperbolic-parabolic equations on 2D `P4estMesh` #### Changed diff --git a/Project.toml b/Project.toml index da618dc78c1..ace71a50d8b 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.1-pre" +version = "0.6.4-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" @@ -66,7 +66,7 @@ Makie = "0.19" MuladdMacro = "0.2.2" Octavian = "0.3.5" OffsetArrays = "1.3" -P4est = "0.4" +P4est = "0.4.9" Polyester = "0.7.5" PrecompileTools = "1.1" Printf = "1" @@ -84,7 +84,7 @@ StaticArrays = "1" StrideArrays = "0.1.18" StructArrays = "0.6" SummationByPartsOperators = "0.5.41" -T8code = "0.4.1" +T8code = "0.4.3" TimerOutputs = "0.5" Triangulate = "2.0" TriplotBase = "0.1" diff --git a/docs/src/meshes/dgmulti_mesh.md b/docs/src/meshes/dgmulti_mesh.md index fc086bba146..efa71334bf8 100644 --- a/docs/src/meshes/dgmulti_mesh.md +++ b/docs/src/meshes/dgmulti_mesh.md @@ -21,7 +21,7 @@ around Jonathan Shewchuk's [Triangle](https://www.cs.cmu.edu/~quake/triangle.htm ## The `DGMulti` solver type -Trixi.jl solvers on simplicial meshes use the `[DGMulti](@ref)` solver type, which allows users to specify +Trixi.jl solvers on simplicial meshes use the [`DGMulti`](@ref) solver type, which allows users to specify `element_type` and `approximation_type` in addition to `polydeg`, `surface_flux`, `surface_integral`, and `volume_integral`. diff --git a/docs/src/parallelization.md b/docs/src/parallelization.md index fa6fc1a5d32..f599eb5fafe 100644 --- a/docs/src/parallelization.md +++ b/docs/src/parallelization.md @@ -53,30 +53,43 @@ a system-provided MPI installation with Trixi.jl can be found in the following s ### [Using a system-provided MPI installation](@id parallel_system_MPI) -When using Trixi.jl with a system-provided MPI backend the underlying -[`p4est`](https://github.com/cburstedde/p4est) and [`t8code`](https://github.com/DLR-AMR/t8code) -libraries need to be compiled with the same MPI installation. Therefore, you also need to -use system-provided `p4est` and `t8code` installations (for notes on how to install `p4est` -and `t8code` see e.g. [here](https://github.com/cburstedde/p4est/blob/master/README) and -[here](https://github.com/DLR-AMR/t8code/wiki/Installation), use the configure option -`--enable-mpi`). Note that `t8code` already comes with a `p4est` installation, so it suffices -to install `t8code`. In addition, [P4est.jl](https://github.com/trixi-framework/P4est.jl) and -[T8code.jl](https://github.com/DLR-AMR/T8code.jl) need to be configured to use the custom -installations. Follow the steps described -[here](https://github.com/DLR-AMR/T8code.jl/blob/main/README.md#installation) and -[here](https://github.com/trixi-framework/P4est.jl/blob/main/README.md#installation) for the -configuration. The paths that point to `libp4est.so` (and potentially to `libsc.so`) need to be -the same for P4est.jl and T8code.jl. This could e.g. be `libp4est.so` that usually can be found -in `lib/` or `local/lib/` in the installation directory of `t8code`. -In total, in your active Julia project you should have a LocalPreferences.toml file with sections -`[MPIPreferences]`, `[T8code]` and `[P4est]` as well as an entry `MPIPreferences` in your -Project.toml to use a custom MPI installation. A `LocalPreferences.toml` file +When using Trixi.jl with a system-provided MPI backend, the underlying +[`p4est`](https://github.com/cburstedde/p4est), [`t8code`](https://github.com/DLR-AMR/t8code) +and [`HDF5`](https://github.com/HDFGroup/hdf5) libraries need to be compiled with the same MPI +installation. If you want to use `p4est` (via the `P4estMesh`) or `t8code` (via the `T8codeMesh`) +from Trixi.jl, you also need to use system-provided `p4est` or `t8code` installations +(for notes on how to install `p4est` and `t8code` see, e.g., [here](https://github.com/cburstedde/p4est/blob/master/README) +and [here](https://github.com/DLR-AMR/t8code/wiki/Installation), use the configure option +`--enable-mpi`). Otherwise, there will be warnings that no preference is set for P4est.jl and +T8code.jl that can be ignored if you do not use these libraries from Trixi.jl. Note that +`t8code` already comes with a `p4est` installation, so it suffices to install `t8code`. +In order to use system-provided `p4est` and `t8code` installations, [P4est.jl](https://github.com/trixi-framework/P4est.jl) +and [T8code.jl](https://github.com/DLR-AMR/T8code.jl) need to be configured to use the custom +installations. Follow the steps described [here](https://github.com/DLR-AMR/T8code.jl/blob/main/README.md#installation) and +[here](https://github.com/trixi-framework/P4est.jl/blob/main/README.md#installation) for the configuration. +The paths that point to `libp4est.so` (and potentially to `libsc.so`) need to be +the same for P4est.jl and T8code.jl. This could, e.g., be `libp4est.so` that usually can be found +in `lib/` or `local/lib/` in the installation directory of `t8code`. Note that the `T8codeMesh`, however, +does not support MPI yet. +The preferences for [HDF5.jl](https://github.com/JuliaIO/HDF5.jl) always need to be set, even if you +do not want to use `HDF5` from Trixi.jl, see also [issue #1079 in HDF5.jl](https://github.com/JuliaIO/HDF5.jl/issues/1079). +To set the preferences for HDF5.jl, follow the instructions described +[here](https://trixi-framework.github.io/Trixi.jl/stable/parallelization/#Using-parallel-input-and-output). + +In total, in your active Julia project you should have a `LocalPreferences.toml` file with sections +`[MPIPreferences]`, `[T8code]` (only needed if `T8codeMesh` is used), `[P4est]` (only needed if +`P4estMesh` is used), and `[HDF5]` as well as an entry `MPIPreferences` in your +`Project.toml` to use a custom MPI installation. A `LocalPreferences.toml` file created as described above might look something like the following: ```toml [HDF5] libhdf5 = "/usr/lib/x86_64-linux-gnu/hdf5/openmpi/libhdf5.so" libhdf5_hl = "/usr/lib/x86_64-linux-gnu/hdf5/openmpi/libhdf5_hl.so" +[HDF5_jll] +libhdf5_hl_path = "/usr/lib/x86_64-linux-gnu/hdf5/openmpi/libhdf5_hl.so" +libhdf5_path = "/usr/lib/x86_64-linux-gnu/hdf5/openmpi/libhdf5.so" + [MPIPreferences] __clear__ = ["preloads_env_switch"] _format = "1.0" @@ -97,6 +110,22 @@ libsc = "/home/mschlott/hackathon/libtrixi/t8code/install/lib/libsc.so" libt8 = "/home/mschlott/hackathon/libtrixi/t8code/install/lib/libt8.so" ``` +This file is created with the following sequence of commands: +```julia +julia> using MPIPreferences +julia> MPIPreferences.use_system_binary() +``` +Restart the Julia REPL +```julia +julia> using P4est +julia> P4est.set_library_p4est!("/home/mschlott/hackathon/libtrixi/t8code/install/lib/libp4est.so") +julia> P4est.set_library_sc!("/home/mschlott/hackathon/libtrixi/t8code/install/lib/libsc.so") +julia> using T8code +julia> T8code.set_libraries_path!("/home/mschlott/hackathon/libtrixi/t8code/install/lib/") +julia> using HDF5 +julia> HDF5.API.set_libraries!("/usr/lib/x86_64-linux-gnu/hdf5/openmpi/libhdf5.so", "/usr/lib/x86_64-linux-gnu/hdf5/openmpi/libhdf5_hl.so") +``` +After the preferences are set, restart the Julia REPL again. ### [Usage](@id parallel_usage) @@ -218,7 +247,7 @@ julia> HDF5.API.set_libraries!("/path/to/your/libhdf5.so", "/path/to/your/libhdf ``` For more information see also the [documentation of HDF5.jl](https://juliaio.github.io/HDF5.jl/stable/mpi/). In total, you should -have a file called LocalPreferences.toml in the project directory that contains a section +have a file called `LocalPreferences.toml` in the project directory that contains a section `[MPIPreferences]`, a section `[HDF5]` with entries `libhdf5` and `libhdf5_hl`, a section `[P4est]` with the entry `libp4est` as well as a section `[T8code]` with the entries `libt8`, `libp4est` and `libsc`. diff --git a/docs/src/performance.md b/docs/src/performance.md index bbe3a3390b7..df66f451b79 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -34,7 +34,7 @@ Hence, you should at least investigate the performance roughly by comparing the timings of several elixirs. Deeper investigations and micro-benchmarks should usually use [BenchmarkTools.jl](https://github.com/JuliaCI/BenchmarkTools.jl). For example, the following steps were used to benchmark the changes introduced in -https://github.com/trixi-framework/Trixi.jl/pull/256. +[PR #256](https://github.com/trixi-framework/Trixi.jl/pull/256). 1. `git checkout e7ebf3846b3fd62ee1d0042e130afb50d7fe8e48` (new version) 2. Start `julia --threads=1 --check-bounds=no`. diff --git a/examples/p4est_2d_dgsem/elixir_advection_diffusion_nonperiodic_amr.jl b/examples/p4est_2d_dgsem/elixir_advection_diffusion_nonperiodic_amr.jl new file mode 100644 index 00000000000..f87c0e056ca --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_advection_diffusion_nonperiodic_amr.jl @@ -0,0 +1,98 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection-diffusion equation + +diffusivity() = 5.0e-2 +advection_velocity = (1.0, 0.0) +equations = LinearScalarAdvectionEquation2D(advection_velocity) +equations_parabolic = LaplaceDiffusion2D(diffusivity(), equations) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +coordinates_min = (-1.0, -0.5) # minimum coordinates (min(x), min(y)) +coordinates_max = (0.0, 0.5) # maximum coordinates (max(x), max(y)) + +trees_per_dimension = (4, 4) +mesh = P4estMesh(trees_per_dimension, + polydeg = 3, initial_refinement_level = 2, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = false) + +# Example setup taken from +# - Truman Ellis, Jesse Chan, and Leszek Demkowicz (2016). +# Robust DPG methods for transient convection-diffusion. +# In: Building bridges: connections and challenges in modern approaches +# to numerical partial differential equations. +# [DOI](https://doi.org/10.1007/978-3-319-41640-3_6). +function initial_condition_eriksson_johnson(x, t, equations) + l = 4 + epsilon = diffusivity() # TODO: this requires epsilon < .6 due to sqrt + lambda_1 = (-1 + sqrt(1 - 4 * epsilon * l)) / (-2 * epsilon) + lambda_2 = (-1 - sqrt(1 - 4 * epsilon * l)) / (-2 * epsilon) + r1 = (1 + sqrt(1 + 4 * pi^2 * epsilon^2)) / (2 * epsilon) + s1 = (1 - sqrt(1 + 4 * pi^2 * epsilon^2)) / (2 * epsilon) + u = exp(-l * t) * (exp(lambda_1 * x[1]) - exp(lambda_2 * x[1])) + + cos(pi * x[2]) * (exp(s1 * x[1]) - exp(r1 * x[1])) / (exp(-s1) - exp(-r1)) + return SVector{1}(u) +end +initial_condition = initial_condition_eriksson_johnson + +boundary_conditions = Dict(:x_neg => BoundaryConditionDirichlet(initial_condition), + :y_neg => BoundaryConditionDirichlet(initial_condition), + :y_pos => BoundaryConditionDirichlet(initial_condition), + :x_pos => boundary_condition_do_nothing) + +boundary_conditions_parabolic = Dict(:x_neg => BoundaryConditionDirichlet(initial_condition), + :x_pos => BoundaryConditionDirichlet(initial_condition), + :y_neg => BoundaryConditionDirichlet(initial_condition), + :y_pos => BoundaryConditionDirichlet(initial_condition)) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolicParabolic(mesh, + (equations, equations_parabolic), + initial_condition, solver; + boundary_conditions = (boundary_conditions, + boundary_conditions_parabolic)) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span `tspan` +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +# The AliveCallback prints short status information in regular intervals +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), + base_level = 1, + med_level = 2, med_threshold = 0.9, + max_level = 3, max_threshold = 1.0) + +amr_callback = AMRCallback(semi, amr_controller, + interval = 50) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, amr_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +time_int_tol = 1.0e-11 +sol = solve(ode, dt = 1e-7, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + ode_default_options()..., callback = callbacks) + +# Print the timer summary +summary_callback() diff --git a/examples/p4est_2d_dgsem/elixir_advection_diffusion_periodic_amr.jl b/examples/p4est_2d_dgsem/elixir_advection_diffusion_periodic_amr.jl new file mode 100644 index 00000000000..fdecb05f7bd --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_advection_diffusion_periodic_amr.jl @@ -0,0 +1,83 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection-diffusion equation + +advection_velocity = (1.5, 1.0) +equations = LinearScalarAdvectionEquation2D(advection_velocity) +diffusivity() = 5.0e-2 +equations_parabolic = LaplaceDiffusion2D(diffusivity(), equations) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) + +trees_per_dimension = (4, 4) +mesh = P4estMesh(trees_per_dimension, + polydeg = 3, initial_refinement_level = 1, + coordinates_min = coordinates_min, coordinates_max = coordinates_max) + +# Define initial condition +function initial_condition_diffusive_convergence_test(x, t, + equation::LinearScalarAdvectionEquation2D) + # Store translated coordinate for easy use of exact solution + x_trans = x - equation.advection_velocity * t + + nu = diffusivity() + c = 1.0 + A = 0.5 + L = 2 + f = 1 / L + omega = 2 * pi * f + scalar = c + A * sin(omega * sum(x_trans)) * exp(-2 * nu * omega^2 * t) + return SVector(scalar) +end +initial_condition = initial_condition_diffusive_convergence_test + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolicParabolic(mesh, + (equations, equations_parabolic), + initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span `tspan` +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan); + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +# The AliveCallback prints short status information in regular intervals +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), + base_level = 1, + med_level = 2, med_threshold = 1.25, + max_level = 3, max_threshold = 1.45) + +amr_callback = AMRCallback(semi, amr_controller, + interval = 20) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, amr_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +time_int_tol = 1.0e-11 +sol = solve(ode, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + ode_default_options()..., callback = callbacks) + +# Print the timer summary +summary_callback() diff --git a/examples/p4est_2d_dgsem/elixir_advection_restart.jl b/examples/p4est_2d_dgsem/elixir_advection_restart.jl index 4f43e122ab3..0f573714c1f 100644 --- a/examples/p4est_2d_dgsem/elixir_advection_restart.jl +++ b/examples/p4est_2d_dgsem/elixir_advection_restart.jl @@ -31,7 +31,7 @@ save_solution.condition.save_initial_solution = false integrator = init(ode, CarpenterKennedy2N54(williamson_condition = false), dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); + save_everystep = false, callback = callbacks, maxiters = 100_000); # Get the last time index and work with that. load_timestep!(integrator, restart_filename) diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl index e6566edb18a..bc28ae6ffb3 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl @@ -41,7 +41,6 @@ heat_bc = Adiabatic((x, t, equations) -> 0.0) boundary_condition_lid = BoundaryConditionNavierStokesWall(velocity_bc_lid, heat_bc) boundary_condition_cavity = BoundaryConditionNavierStokesWall(velocity_bc_cavity, heat_bc) -# define periodic boundary conditions everywhere boundary_conditions = Dict(:x_neg => boundary_condition_slip_wall, :y_neg => boundary_condition_slip_wall, :y_pos => boundary_condition_slip_wall, diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl new file mode 100644 index 00000000000..898366969a4 --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl @@ -0,0 +1,94 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the ideal compressible Navier-Stokes equations + +# TODO: parabolic; unify names of these accessor functions +prandtl_number() = 0.72 +mu() = 0.001 + +equations = CompressibleEulerEquations2D(1.4) +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), + Prandtl = prandtl_number()) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) + +# Create a uniformly refined mesh +trees_per_dimension = (6, 6) +mesh = P4estMesh(trees_per_dimension, + polydeg = 3, initial_refinement_level = 2, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = (false, false)) + +function initial_condition_cavity(x, t, equations::CompressibleEulerEquations2D) + Ma = 0.1 + rho = 1.0 + u, v = 0.0, 0.0 + p = 1.0 / (Ma^2 * equations.gamma) + return prim2cons(SVector(rho, u, v, p), equations) +end +initial_condition = initial_condition_cavity + +# BC types +velocity_bc_lid = NoSlip((x, t, equations) -> SVector(1.0, 0.0)) +velocity_bc_cavity = NoSlip((x, t, equations) -> SVector(0.0, 0.0)) +heat_bc = Adiabatic((x, t, equations) -> 0.0) +boundary_condition_lid = BoundaryConditionNavierStokesWall(velocity_bc_lid, heat_bc) +boundary_condition_cavity = BoundaryConditionNavierStokesWall(velocity_bc_cavity, heat_bc) + +boundary_conditions = Dict(:x_neg => boundary_condition_slip_wall, + :y_neg => boundary_condition_slip_wall, + :y_pos => boundary_condition_slip_wall, + :x_pos => boundary_condition_slip_wall) + +boundary_conditions_parabolic = Dict(:x_neg => boundary_condition_cavity, + :y_neg => boundary_condition_cavity, + :y_pos => boundary_condition_lid, + :x_pos => boundary_condition_cavity) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver; + boundary_conditions = (boundary_conditions, + boundary_conditions_parabolic)) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span `tspan` +tspan = (0.0, 25.0) +ode = semidiscretize(semi, tspan); + +summary_callback = SummaryCallback() +alive_callback = AliveCallback(alive_interval = 2000) +analysis_interval = 2000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +amr_indicator = IndicatorLöhner(semi, variable = Trixi.density) + +amr_controller = ControllerThreeLevel(semi, amr_indicator, + base_level = 0, + med_level = 1, med_threshold = 0.005, + max_level = 2, max_threshold = 0.01) + +amr_callback = AMRCallback(semi, amr_controller, + interval = 50, + adapt_initial_condition = true, + adapt_initial_condition_only_refine = true) + +callbacks = CallbackSet(summary_callback, alive_callback, analysis_callback, amr_callback) +# callbacks = CallbackSet(summary_callback, alive_callback) + +############################################################################### +# run the simulation + +time_int_tol = 1e-8 +sol = solve(ode, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + ode_default_options()..., callback = callbacks) + +summary_callback() # print the timer summary diff --git a/examples/p4est_3d_dgsem/elixir_advection_restart.jl b/examples/p4est_3d_dgsem/elixir_advection_restart.jl index cd97d69d692..b3dead42399 100644 --- a/examples/p4est_3d_dgsem/elixir_advection_restart.jl +++ b/examples/p4est_3d_dgsem/elixir_advection_restart.jl @@ -29,7 +29,7 @@ save_solution.condition.save_initial_solution = false integrator = init(ode, CarpenterKennedy2N54(williamson_condition = false), dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); + save_everystep = false, callback = callbacks, maxiters = 100_000); # Get the last time index and work with that. load_timestep!(integrator, restart_filename) diff --git a/examples/structured_2d_dgsem/elixir_advection_restart.jl b/examples/structured_2d_dgsem/elixir_advection_restart.jl index 19863faae8d..0accbdba702 100644 --- a/examples/structured_2d_dgsem/elixir_advection_restart.jl +++ b/examples/structured_2d_dgsem/elixir_advection_restart.jl @@ -30,7 +30,7 @@ save_solution.condition.save_initial_solution = false integrator = init(ode, CarpenterKennedy2N54(williamson_condition = false), dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); + save_everystep = false, callback = callbacks, maxiters = 100_000); # Get the last time index and work with that. load_timestep!(integrator, restart_filename) diff --git a/examples/structured_2d_dgsem/elixir_eulermulti_convergence_ec.jl b/examples/structured_2d_dgsem/elixir_eulermulti_convergence_ec.jl new file mode 100644 index 00000000000..95f71f38130 --- /dev/null +++ b/examples/structured_2d_dgsem/elixir_eulermulti_convergence_ec.jl @@ -0,0 +1,55 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler multicomponent equations +equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.4), + gas_constants = (0.4, 0.4)) + +initial_condition = initial_condition_convergence_test + +volume_flux = flux_ranocha +solver = DGSEM(polydeg = 3, surface_flux = flux_ranocha, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +cells_per_dimension = (16, 16) +coordinates_min = (-1.0, -1.0) +coordinates_max = (1.0, 1.0) +mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.4) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 0.5) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/structured_3d_dgsem/elixir_advection_restart.jl b/examples/structured_3d_dgsem/elixir_advection_restart.jl index e81ad5d6430..e516d794df8 100644 --- a/examples/structured_3d_dgsem/elixir_advection_restart.jl +++ b/examples/structured_3d_dgsem/elixir_advection_restart.jl @@ -29,7 +29,7 @@ save_solution.condition.save_initial_solution = false integrator = init(ode, CarpenterKennedy2N54(williamson_condition = false), dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); + save_everystep = false, callback = callbacks, maxiters = 100_000); # Get the last time index and work with that. load_timestep!(integrator, restart_filename) diff --git a/examples/tree_2d_dgsem/elixir_advection_restart.jl b/examples/tree_2d_dgsem/elixir_advection_restart.jl index 770629bb15e..e0d1003f524 100644 --- a/examples/tree_2d_dgsem/elixir_advection_restart.jl +++ b/examples/tree_2d_dgsem/elixir_advection_restart.jl @@ -29,7 +29,7 @@ save_solution.condition.save_initial_solution = false integrator = init(ode, alg, dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks; ode_default_options()...) + callback = callbacks, maxiters = 100_000; ode_default_options()...) # Load saved context for adaptive time integrator if integrator.opts.adaptive diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl index f5ef51c108a..c6ed07dcda1 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble.jl @@ -17,10 +17,8 @@ A shock-bubble testcase for multicomponent Euler equations [arXiv: 1904.00972](https://arxiv.org/abs/1904.00972) """ function initial_condition_shock_bubble(x, t, - equations::CompressibleEulerMulticomponentEquations2D{ - 5, - 2 - }) + equations::CompressibleEulerMulticomponentEquations2D{5, + 2}) # bubble test case, see Gouasmi et al. https://arxiv.org/pdf/1904.00972 # other reference: https://www.researchgate.net/profile/Pep_Mulet/publication/222675930_A_flux-split_algorithm_applied_to_conservative_models_for_multicomponent_compressible_flows/links/568da54508aeaa1481ae7af0.pdf # typical domain is rectangular, we change it to a square, as Trixi can only do squares diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl index 3159a2066ad..4b606502ebe 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_minmax.jl @@ -17,10 +17,8 @@ A shock-bubble testcase for multicomponent Euler equations [arXiv: 1904.00972](https://arxiv.org/abs/1904.00972) """ function initial_condition_shock_bubble(x, t, - equations::CompressibleEulerMulticomponentEquations2D{ - 5, - 2 - }) + equations::CompressibleEulerMulticomponentEquations2D{5, + 2}) # bubble test case, see Gouasmi et al. https://arxiv.org/pdf/1904.00972 # other reference: https://www.researchgate.net/profile/Pep_Mulet/publication/222675930_A_flux-split_algorithm_applied_to_conservative_models_for_multicomponent_compressible_flows/links/568da54508aeaa1481ae7af0.pdf # typical domain is rectangular, we change it to a square, as Trixi can only do squares diff --git a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl index 7856c9bafbd..78ff47e255f 100644 --- a/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl +++ b/examples/tree_2d_dgsem/elixir_eulermulti_shock_bubble_shockcapturing_subcell_positivity.jl @@ -17,10 +17,8 @@ A shock-bubble testcase for multicomponent Euler equations [arXiv: 1904.00972](https://arxiv.org/abs/1904.00972) """ function initial_condition_shock_bubble(x, t, - equations::CompressibleEulerMulticomponentEquations2D{ - 5, - 2 - }) + equations::CompressibleEulerMulticomponentEquations2D{5, + 2}) # bubble test case, see Gouasmi et al. https://arxiv.org/pdf/1904.00972 # other reference: https://www.researchgate.net/profile/Pep_Mulet/publication/222675930_A_flux-split_algorithm_applied_to_conservative_models_for_multicomponent_compressible_flows/links/568da54508aeaa1481ae7af0.pdf # typical domain is rectangular, we change it to a square, as Trixi can only do squares diff --git a/examples/tree_2d_dgsem/elixir_eulerpolytropic_convergence.jl b/examples/tree_2d_dgsem/elixir_eulerpolytropic_convergence.jl new file mode 100644 index 00000000000..95ef38eb701 --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_eulerpolytropic_convergence.jl @@ -0,0 +1,63 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the polytropic Euler equations + +gamma = 1.4 +kappa = 0.5 # Scaling factor for the pressure. +equations = PolytropicEulerEquations2D(gamma, kappa) + +initial_condition = initial_condition_convergence_test + +volume_flux = flux_winters_etal +solver = DGSEM(polydeg = 3, surface_flux = FluxHLL(min_max_speed_davis), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +coordinates_min = (0.0, 0.0) +coordinates_max = (1.0, 1.0) + +# Create a uniformly refined mesh with periodic boundaries +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 2, + periodicity = true, + n_cells_max = 30_000) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.1) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_errors = (:l2_error_primitive, + :linf_error_primitive)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 0.1) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl b/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl index 7bd1ec0c647..70d76fc9075 100644 --- a/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl @@ -40,7 +40,6 @@ heat_bc = Adiabatic((x, t, equations) -> 0.0) boundary_condition_lid = BoundaryConditionNavierStokesWall(velocity_bc_lid, heat_bc) boundary_condition_cavity = BoundaryConditionNavierStokesWall(velocity_bc_cavity, heat_bc) -# define periodic boundary conditions everywhere boundary_conditions = boundary_condition_slip_wall boundary_conditions_parabolic = (; x_neg = boundary_condition_cavity, diff --git a/examples/tree_3d_dgsem/elixir_advection_restart.jl b/examples/tree_3d_dgsem/elixir_advection_restart.jl index f81e013fc0a..a53f4ebf5b1 100644 --- a/examples/tree_3d_dgsem/elixir_advection_restart.jl +++ b/examples/tree_3d_dgsem/elixir_advection_restart.jl @@ -27,7 +27,7 @@ save_solution.condition.save_initial_solution = false integrator = init(ode, CarpenterKennedy2N54(williamson_condition = false), dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); + save_everystep = false, callback = callbacks, maxiters = 100_000); # Get the last time index and work with that. load_timestep!(integrator, restart_filename) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_restart.jl b/examples/unstructured_2d_dgsem/elixir_euler_restart.jl index 4d6af65b8a4..c1908691902 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_restart.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_restart.jl @@ -29,7 +29,7 @@ save_solution.condition.save_initial_solution = false integrator = init(ode, CarpenterKennedy2N54(williamson_condition = false), dt = dt, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); + save_everystep = false, callback = callbacks, maxiters = 100_000); # Get the last time index and work with that. load_timestep!(integrator, restart_filename) diff --git a/ext/TrixiMakieExt.jl b/ext/TrixiMakieExt.jl index 8cd7576a6e5..301a7656da9 100644 --- a/ext/TrixiMakieExt.jl +++ b/ext/TrixiMakieExt.jl @@ -29,9 +29,7 @@ import Trixi: iplot, iplot! # First some utilities # Given a reference plotting triangulation, this function generates a plotting triangulation for # the entire global mesh. The output can be plotted using `Makie.mesh`. -function global_plotting_triangulation_makie(pds::PlotDataSeries{ - <:PlotData2DTriangulated - }; +function global_plotting_triangulation_makie(pds::PlotDataSeries{<:PlotData2DTriangulated}; set_z_coordinate_zero = false) @unpack variable_id = pds pd = pds.plot_data @@ -61,8 +59,7 @@ end # Returns a list of `Makie.Point`s which can be used to plot the mesh, or a solution "wireframe" # (e.g., a plot of the mesh lines but with the z-coordinate equal to the value of the solution). -function convert_PlotData2D_to_mesh_Points(pds::PlotDataSeries{<:PlotData2DTriangulated - }; +function convert_PlotData2D_to_mesh_Points(pds::PlotDataSeries{<:PlotData2DTriangulated}; set_z_coordinate_zero = false) @unpack variable_id = pds pd = pds.plot_data diff --git a/src/auxiliary/p4est.jl b/src/auxiliary/p4est.jl index 968af339cbd..0b826254129 100644 --- a/src/auxiliary/p4est.jl +++ b/src/auxiliary/p4est.jl @@ -13,14 +13,20 @@ This function will check if `p4est` is already initialized and if yes, do nothing, thus it is safe to call it multiple times. """ function init_p4est() - p4est_package_id = P4est.package_id() - if p4est_package_id >= 0 - return nothing + # Only initialize p4est if P4est.jl can be used + if P4est.preferences_set_correctly() + p4est_package_id = P4est.package_id() + if p4est_package_id >= 0 + return nothing + end + + # Initialize `p4est` with log level ERROR to prevent a lot of output in AMR simulations + p4est_init(C_NULL, SC_LP_ERROR) + else + @warn "Preferences for P4est.jl are not set correctly. Until fixed, using `P4estMesh` will result in a crash. " * + "See also https://trixi-framework.github.io/Trixi.jl/stable/parallelization/#parallel_system_MPI" end - # Initialize `p4est` with log level ERROR to prevent a lot of output in AMR simulations - p4est_init(C_NULL, SC_LP_ERROR) - return nothing end diff --git a/src/auxiliary/precompile.jl b/src/auxiliary/precompile.jl index 7ed0e26b5ef..9cec502f6cb 100644 --- a/src/auxiliary/precompile.jl +++ b/src/auxiliary/precompile.jl @@ -186,8 +186,7 @@ function _precompile_manual_() Matrix{RealT}, # DerivativeMatrix #StaticArrays.SArray{Tuple{nnodes_,nnodes_},RealT,2,nnodes_^2}, - Matrix{RealT} - } + Matrix{RealT}} end function mortar_type_dgsem(RealT, nnodes_) @@ -197,8 +196,7 @@ function _precompile_manual_() Matrix{RealT}, # ReverseMatrix # StaticArrays.SArray{Tuple{nnodes_,nnodes_},RealT,2,nnodes_^2}, - Matrix{RealT} - } + Matrix{RealT}} end function analyzer_type_dgsem(RealT, nnodes_) @@ -208,8 +206,7 @@ function _precompile_manual_() # VectorT StaticArrays.SVector{nnodes_analysis, RealT}, # Vandermonde - Array{RealT, 2} - } + Array{RealT, 2}} end function adaptor_type_dgsem(RealT, nnodes_) @@ -242,8 +239,8 @@ function _precompile_manual_() @assert Base.precompile(Tuple{Core.kwftype(typeof(Trixi.Type)), NamedTuple{(:initial_refinement_level, :n_cells_max), Tuple{Int, Int}}, Type{TreeMesh}, - Tuple{RealT, RealT, RealT}, Tuple{RealT, RealT, RealT - }}) + Tuple{RealT, RealT, RealT}, + Tuple{RealT, RealT, RealT}}) end for TreeType in (SerialTree, ParallelTree), NDIMS in 1:3 @assert Base.precompile(Tuple{typeof(Trixi.initialize!), @@ -308,8 +305,8 @@ function _precompile_manual_() Base.precompile(Tuple{Type{LobattoLegendreBasis}, Int}) for RealT in (Float64,) Base.precompile(Tuple{Type{LobattoLegendreBasis}, RealT, Int}) - @assert Base.precompile(Tuple{typeof(Trixi.calc_dhat), Vector{RealT}, Vector{RealT} - }) + @assert Base.precompile(Tuple{typeof(Trixi.calc_dhat), Vector{RealT}, + Vector{RealT}}) @assert Base.precompile(Tuple{typeof(Trixi.calc_dsplit), Vector{RealT}, Vector{RealT}}) @assert Base.precompile(Tuple{typeof(Trixi.polynomial_derivative_matrix), @@ -332,10 +329,10 @@ function _precompile_manual_() @assert Base.precompile(Tuple{typeof(Trixi.calc_forward_lower), Int}) @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_upper), Int, Val{:gauss}}) @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_lower), Int, Val{:gauss}}) - @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_upper), Int, Val{:gauss_lobatto - }}) - @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_lower), Int, Val{:gauss_lobatto - }}) + @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_upper), Int, + Val{:gauss_lobatto}}) + @assert Base.precompile(Tuple{typeof(Trixi.calc_reverse_lower), Int, + Val{:gauss_lobatto}}) # Constructors: mortars, analyzers, adaptors for RealT in (Float64,), polydeg in 1:7 @@ -362,14 +359,12 @@ function _precompile_manual_() NamedTuple{(:interval, :save_final_restart), Tuple{Int, Bool}}, Type{SaveRestartCallback}}) @assert Base.precompile(Tuple{Core.kwftype(typeof(Trixi.Type)), - NamedTuple{ - (:interval, :save_initial_solution, + NamedTuple{(:interval, :save_initial_solution, :save_final_solution, :solution_variables), Tuple{Int, Bool, Bool, typeof(cons2cons)}}, Type{SaveSolutionCallback}}) @assert Base.precompile(Tuple{Core.kwftype(typeof(Trixi.Type)), - NamedTuple{ - (:interval, :save_initial_solution, + NamedTuple{(:interval, :save_initial_solution, :save_final_solution, :solution_variables), Tuple{Int, Bool, Bool, typeof(cons2prim)}}, Type{SaveSolutionCallback}}) @@ -385,8 +380,7 @@ function _precompile_manual_() # end # end @assert Base.precompile(Tuple{typeof(SummaryCallback)}) - @assert Base.precompile(Tuple{ - DiscreteCallback{typeof(Trixi.summary_callback), + @assert Base.precompile(Tuple{DiscreteCallback{typeof(Trixi.summary_callback), typeof(Trixi.summary_callback), typeof(Trixi.initialize_summary_callback), typeof(SciMLBase.FINALIZE_DEFAULT)}}) @@ -419,8 +413,8 @@ function _precompile_manual_() Trixi.ElementContainer2D{RealT, uEltype}}) @assert Base.precompile(Tuple{typeof(Trixi.init_mortars), Array{Int, 1}, TreeMesh{2, Trixi.SerialTree{2}}, - Trixi.ElementContainer2D{RealT, uEltype}, mortar_type - }) + Trixi.ElementContainer2D{RealT, uEltype}, + mortar_type}) @assert Base.precompile(Tuple{typeof(Trixi.save_mesh_file), TreeMesh{2, Trixi.SerialTree{2}}, String}) @@ -433,8 +427,8 @@ function _precompile_manual_() Trixi.ElementContainer2D{RealT, uEltype}}) @assert Base.precompile(Tuple{typeof(Trixi.init_mortars), Array{Int, 1}, TreeMesh{2, Trixi.ParallelTree{2}}, - Trixi.ElementContainer2D{RealT, uEltype}, mortar_type - }) + Trixi.ElementContainer2D{RealT, uEltype}, + mortar_type}) @assert Base.precompile(Tuple{typeof(Trixi.init_mpi_interfaces), Array{Int, 1}, TreeMesh{2, Trixi.ParallelTree{2}}, Trixi.ElementContainer2D{RealT, uEltype}}) @@ -450,8 +444,8 @@ function _precompile_manual_() Trixi.ElementContainer3D{RealT, uEltype}}) @assert Base.precompile(Tuple{typeof(Trixi.init_mortars), Array{Int, 1}, TreeMesh{3, Trixi.SerialTree{3}}, - Trixi.ElementContainer3D{RealT, uEltype}, mortar_type - }) + Trixi.ElementContainer3D{RealT, uEltype}, + mortar_type}) @assert Base.precompile(Tuple{typeof(Trixi.save_mesh_file), TreeMesh{3, Trixi.SerialTree{3}}, String}) end @@ -548,16 +542,10 @@ function _precompile_manual_() restart_callback_type}) for solution_variables in (cons2cons, cons2prim) - save_solution_callback_type = DiscreteCallback{ - SaveSolutionCallback{ - typeof(solution_variables) - }, - SaveSolutionCallback{ - typeof(solution_variables) - }, + save_solution_callback_type = DiscreteCallback{SaveSolutionCallback{typeof(solution_variables)}, + SaveSolutionCallback{typeof(solution_variables)}, typeof(Trixi.initialize!), - typeof(SciMLBase.FINALIZE_DEFAULT) - } + typeof(SciMLBase.FINALIZE_DEFAULT)} @assert Base.precompile(Tuple{typeof(show), Base.TTY, save_solution_callback_type}) @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, diff --git a/src/auxiliary/special_elixirs.jl b/src/auxiliary/special_elixirs.jl index 25bca8939ce..5fdd9aea0c5 100644 --- a/src/auxiliary/special_elixirs.jl +++ b/src/auxiliary/special_elixirs.jl @@ -20,7 +20,7 @@ providing examples with sensible default values for users. Before replacing assignments in `elixir`, the keyword argument `maxiters` is inserted into calls to `solve` and `Trixi.solve` with it's default value used in the SciML ecosystem -for ODEs, see the "Miscellaneous" section of the +for ODEs, see the "Miscellaneous" section of the [documentation](https://docs.sciml.ai/DiffEqDocs/stable/basics/common_solver_opts/). # Examples @@ -36,6 +36,16 @@ julia> redirect_stdout(devnull) do ``` """ function trixi_include(mod::Module, elixir::AbstractString; kwargs...) + # Check that all kwargs exist as assignments + code = read(elixir, String) + expr = Meta.parse("begin \n$code \nend") + expr = insert_maxiters(expr) + + for (key, val) in kwargs + # This will throw an error when `key` is not found + find_assignment(expr, key) + end + # Print information on potential wait time only in non-parallel case if !mpi_isparallel() @info "You just called `trixi_include`. Julia may now compile the code, please be patient." @@ -243,6 +253,7 @@ end function find_assignment(expr, destination) # declare result to be able to assign to it in the closure local result + found = false # find explicit and keyword assignments walkexpr(expr) do x @@ -250,12 +261,17 @@ function find_assignment(expr, destination) if (x.head === Symbol("=") || x.head === :kw) && x.args[1] === Symbol(destination) result = x.args[2] + found = true # dump(x) end end return x end + if !found + throw(ArgumentError("assignment `$destination` not found in expression")) + end + result end @@ -274,17 +290,28 @@ function extract_initial_resolution(elixir, kwargs) return initial_refinement_level end catch e - if isa(e, UndefVarError) - # get cells_per_dimension from the elixir - cells_per_dimension = eval(find_assignment(expr, :cells_per_dimension)) - - if haskey(kwargs, :cells_per_dimension) - return kwargs[:cells_per_dimension] - else - return cells_per_dimension + # If `initial_refinement_level` is not found, we will get an `ArgumentError` + if isa(e, ArgumentError) + try + # get cells_per_dimension from the elixir + cells_per_dimension = eval(find_assignment(expr, :cells_per_dimension)) + + if haskey(kwargs, :cells_per_dimension) + return kwargs[:cells_per_dimension] + else + return cells_per_dimension + end + catch e2 + # If `cells_per_dimension` is not found either + if isa(e2, ArgumentError) + throw(ArgumentError("`convergence_test` requires the elixir to define " * + "`initial_refinement_level` or `cells_per_dimension`")) + else + rethrow() + end end else - throw(e) + rethrow() end end end diff --git a/src/auxiliary/t8code.jl b/src/auxiliary/t8code.jl index 37cb782bb93..bd781b21c1e 100644 --- a/src/auxiliary/t8code.jl +++ b/src/auxiliary/t8code.jl @@ -7,34 +7,40 @@ is already initialized and if yes, do nothing, thus it is safe to call it multiple times. """ function init_t8code() - t8code_package_id = t8_get_package_id() - if t8code_package_id >= 0 - return nothing - end + # Only initialize t8code if T8code.jl can be used + if T8code.preferences_set_correctly() + t8code_package_id = t8_get_package_id() + if t8code_package_id >= 0 + return nothing + end - # Initialize the sc library, has to happen before we initialize t8code. - let catch_signals = 0, print_backtrace = 0, log_handler = C_NULL - T8code.Libt8.sc_init(mpi_comm(), catch_signals, print_backtrace, log_handler, - T8code.Libt8.SC_LP_ERROR) - end + # Initialize the sc library, has to happen before we initialize t8code. + let catch_signals = 0, print_backtrace = 0, log_handler = C_NULL + T8code.Libt8.sc_init(mpi_comm(), catch_signals, print_backtrace, log_handler, + T8code.Libt8.SC_LP_ERROR) + end - if T8code.Libt8.p4est_is_initialized() == 0 - # Initialize `p4est` with log level ERROR to prevent a lot of output in AMR simulations - T8code.Libt8.p4est_init(C_NULL, T8code.Libt8.SC_LP_ERROR) - end + if T8code.Libt8.p4est_is_initialized() == 0 + # Initialize `p4est` with log level ERROR to prevent a lot of output in AMR simulations + T8code.Libt8.p4est_init(C_NULL, T8code.Libt8.SC_LP_ERROR) + end - # Initialize t8code with log level ERROR to prevent a lot of output in AMR simulations. - t8_init(T8code.Libt8.SC_LP_ERROR) - - if haskey(ENV, "TRIXI_T8CODE_SC_FINALIZE") - # Normally, `sc_finalize` should always be called during shutdown of an - # application. It checks whether there is still un-freed memory by t8code - # and/or T8code.jl and throws an exception if this is the case. For - # production runs this is not mandatory, but is helpful during - # development. Hence, this option is only activated when environment - # variable TRIXI_T8CODE_SC_FINALIZE exists. - @warn "T8code.jl: sc_finalize will be called during shutdown of Trixi.jl." - MPI.add_finalize_hook!(T8code.Libt8.sc_finalize) + # Initialize t8code with log level ERROR to prevent a lot of output in AMR simulations. + t8_init(T8code.Libt8.SC_LP_ERROR) + + if haskey(ENV, "TRIXI_T8CODE_SC_FINALIZE") + # Normally, `sc_finalize` should always be called during shutdown of an + # application. It checks whether there is still un-freed memory by t8code + # and/or T8code.jl and throws an exception if this is the case. For + # production runs this is not mandatory, but is helpful during + # development. Hence, this option is only activated when environment + # variable TRIXI_T8CODE_SC_FINALIZE exists. + @warn "T8code.jl: sc_finalize will be called during shutdown of Trixi.jl." + MPI.add_finalize_hook!(T8code.Libt8.sc_finalize) + end + else + @warn "Preferences for T8code.jl are not set correctly. Until fixed, using `T8codeMesh` will result in a crash. " * + "See also https://trixi-framework.github.io/Trixi.jl/stable/parallelization/#parallel_system_MPI" end return nothing diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index ba840ff9675..5854c8617c3 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -528,6 +528,103 @@ function copy_to_quad_iter_volume(info, user_data) return nothing end +# specialized callback which includes the `cache_parabolic` argument +function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, + equations, dg::DG, cache, cache_parabolic, + semi, + t, iter; + only_refine = false, only_coarsen = false, + passive_args = ()) + @unpack controller, adaptor = amr_callback + + u = wrap_array(u_ode, mesh, equations, dg, cache) + lambda = @trixi_timeit timer() "indicator" controller(u, mesh, equations, dg, cache, + t = t, iter = iter) + + @boundscheck begin + @assert axes(lambda)==(Base.OneTo(ncells(mesh)),) ("Indicator array (axes = $(axes(lambda))) and mesh cells (axes = $(Base.OneTo(ncells(mesh)))) have different axes") + end + + # Copy controller value of each quad to the quad's user data storage + iter_volume_c = cfunction(copy_to_quad_iter_volume, Val(ndims(mesh))) + + # The pointer to lambda will be interpreted as Ptr{Int} below + @assert lambda isa Vector{Int} + iterate_p4est(mesh.p4est, lambda; iter_volume_c = iter_volume_c) + + @trixi_timeit timer() "refine" if !only_coarsen + # Refine mesh + refined_original_cells = @trixi_timeit timer() "mesh" refine!(mesh) + + # Refine solver + @trixi_timeit timer() "solver" refine!(u_ode, adaptor, mesh, equations, dg, + cache, cache_parabolic, + refined_original_cells) + for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args + @trixi_timeit timer() "passive solver" refine!(p_u_ode, adaptor, p_mesh, + p_equations, + p_dg, p_cache, + refined_original_cells) + end + else + # If there is nothing to refine, create empty array for later use + refined_original_cells = Int[] + end + + @trixi_timeit timer() "coarsen" if !only_refine + # Coarsen mesh + coarsened_original_cells = @trixi_timeit timer() "mesh" coarsen!(mesh) + + # coarsen solver + @trixi_timeit timer() "solver" coarsen!(u_ode, adaptor, mesh, equations, dg, + cache, cache_parabolic, + coarsened_original_cells) + for (p_u_ode, p_mesh, p_equations, p_dg, p_cache) in passive_args + @trixi_timeit timer() "passive solver" coarsen!(p_u_ode, adaptor, p_mesh, + p_equations, + p_dg, p_cache, + coarsened_original_cells) + end + else + # If there is nothing to coarsen, create empty array for later use + coarsened_original_cells = Int[] + end + + # Store whether there were any cells coarsened or refined and perform load balancing + has_changed = !isempty(refined_original_cells) || !isempty(coarsened_original_cells) + # Check if mesh changed on other processes + if mpi_isparallel() + has_changed = MPI.Allreduce!(Ref(has_changed), |, mpi_comm())[] + end + + if has_changed # TODO: Taal decide, where shall we set this? + # don't set it to has_changed since there can be changes from earlier calls + mesh.unsaved_changes = true + + if mpi_isparallel() && amr_callback.dynamic_load_balancing + @trixi_timeit timer() "dynamic load balancing" begin + global_first_quadrant = unsafe_wrap(Array, + unsafe_load(mesh.p4est).global_first_quadrant, + mpi_nranks() + 1) + old_global_first_quadrant = copy(global_first_quadrant) + partition!(mesh) + rebalance_solver!(u_ode, mesh, equations, dg, cache, + old_global_first_quadrant) + end + end + + reinitialize_boundaries!(semi.boundary_conditions, cache) + # if the semidiscretization also stores parabolic boundary conditions, + # reinitialize them after each refinement step as well. + if hasproperty(semi, :boundary_conditions_parabolic) + reinitialize_boundaries!(semi.boundary_conditions_parabolic, cache) + end + end + + # Return true if there were any cells coarsened or refined, otherwise false + return has_changed +end + # 2D function cfunction(::typeof(copy_to_quad_iter_volume), ::Val{2}) @cfunction(copy_to_quad_iter_volume, Cvoid, diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index 6395a9f348f..969f9c564f3 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -136,8 +136,8 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estM return nothing end -# AMR for hyperbolic-parabolic equations currently only supported on TreeMeshes -function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, TreeMesh{3}}, +function refine!(u_ode::AbstractVector, adaptor, + mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}}, equations, dg::DGSEM, cache, cache_parabolic, elements_to_refine) # Call `refine!` for the hyperbolic part, which does the heavy lifting of @@ -298,8 +298,8 @@ function coarsen!(u_ode::AbstractVector, adaptor, return nothing end -# AMR for hyperbolic-parabolic equations currently only supported on TreeMeshes -function coarsen!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, TreeMesh{3}}, +function coarsen!(u_ode::AbstractVector, adaptor, + mesh::Union{TreeMesh{2}, P4estMesh{2}, TreeMesh{3}}, equations, dg::DGSEM, cache, cache_parabolic, elements_to_remove) # Call `coarsen!` for the hyperbolic part, which does the heavy lifting of diff --git a/src/callbacks_step/analysis.jl b/src/callbacks_step/analysis.jl index e5b4a01a885..ba232032951 100644 --- a/src/callbacks_step/analysis.jl +++ b/src/callbacks_step/analysis.jl @@ -23,6 +23,13 @@ Additional errors can be computed, e.g. by passing `extra_analysis_errors = (:l2_error_primitive, :linf_error_primitive)` or `extra_analysis_errors = (:conservation_error,)`. +If you want to omit the computation (to safe compute-time) of the [`default_analysis_errors`](@ref), specify +`analysis_errors = Symbol[]`. +Note: `default_analysis_errors` are `:l2_error` and `:linf_error` for all equations. +If you want to compute `extra_analysis_errors` such as `:conservation_error` solely, i.e., +without `:l2_error, :linf_error` you need to specify +`analysis_errors = [:conservation_error]` instead of `extra_analysis_errors = [:conservation_error]`. + Further scalar functions `func` in `extra_analysis_integrals` are applied to the numerical solution and integrated over the computational domain. Some examples for this are [`entropy`](@ref), [`energy_kinetic`](@ref), [`energy_internal`](@ref), and [`energy_total`](@ref). @@ -332,7 +339,8 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi) @notimeit timer() integrator.f(du_ode, u_ode, semi, t) u = wrap_array(u_ode, mesh, equations, solver, cache) du = wrap_array(du_ode, mesh, equations, solver, cache) - l2_error, linf_error = analysis_callback(io, du, u, u_ode, t, semi) + # Compute l2_error, linf_error + analysis_callback(io, du, u, u_ode, t, semi) mpi_println("─"^100) mpi_println() @@ -354,8 +362,7 @@ function (analysis_callback::AnalysisCallback)(u_ode, du_ode, integrator, semi) analysis_callback.start_time_last_analysis = time_ns() analysis_callback.ncalls_rhs_last_analysis = ncalls(semi.performance_counter) - # Return errors for EOC analysis - return l2_error, linf_error + return nothing end # This method is just called internally from `(analysis_callback::AnalysisCallback)(integrator)` @@ -377,28 +384,31 @@ function (analysis_callback::AnalysisCallback)(io, du, u, u_ode, t, semi) println() end - # Calculate L2/Linf errors, which are also returned - l2_error, linf_error = calc_error_norms(u_ode, t, analyzer, semi, cache_analysis) + if :l2_error in analysis_errors || :linf_error in analysis_errors + # Calculate L2/Linf errors + l2_error, linf_error = calc_error_norms(u_ode, t, analyzer, semi, + cache_analysis) - if mpi_isroot() - # L2 error - if :l2_error in analysis_errors - print(" L2 error: ") - for v in eachvariable(equations) - @printf(" % 10.8e", l2_error[v]) - @printf(io, " % 10.8e", l2_error[v]) + if mpi_isroot() + # L2 error + if :l2_error in analysis_errors + print(" L2 error: ") + for v in eachvariable(equations) + @printf(" % 10.8e", l2_error[v]) + @printf(io, " % 10.8e", l2_error[v]) + end + println() end - println() - end - # Linf error - if :linf_error in analysis_errors - print(" Linf error: ") - for v in eachvariable(equations) - @printf(" % 10.8e", linf_error[v]) - @printf(io, " % 10.8e", linf_error[v]) + # Linf error + if :linf_error in analysis_errors + print(" Linf error: ") + for v in eachvariable(equations) + @printf(" % 10.8e", linf_error[v]) + @printf(io, " % 10.8e", linf_error[v]) + end + println() end - println() end end @@ -477,7 +487,7 @@ function (analysis_callback::AnalysisCallback)(io, du, u, u_ode, t, semi) # additional integrals analyze_integrals(analysis_integrals, io, du, u, t, semi) - return l2_error, linf_error + return nothing end # Print level information only if AMR is enabled diff --git a/src/callbacks_step/averaging.jl b/src/callbacks_step/averaging.jl index 8d2dcfeaefe..efa71af9b91 100644 --- a/src/callbacks_step/averaging.jl +++ b/src/callbacks_step/averaging.jl @@ -52,8 +52,7 @@ function Base.show(io::IO, ::MIME"text/plain", end function AveragingCallback(semi::SemidiscretizationHyperbolic{<:Any, - <:CompressibleEulerEquations2D - }, + <:CompressibleEulerEquations2D}, tspan; output_directory = "out", filename = "averaging.h5") mesh, equations, solver, cache = mesh_equations_solver_cache(semi) mean_values = initialize_mean_values(mesh, equations, solver, cache) diff --git a/src/callbacks_step/euler_acoustics_coupling.jl b/src/callbacks_step/euler_acoustics_coupling.jl index ea33175d0c5..52dc55befdc 100644 --- a/src/callbacks_step/euler_acoustics_coupling.jl +++ b/src/callbacks_step/euler_acoustics_coupling.jl @@ -34,8 +34,8 @@ the [`AveragingCallback`](@ref). A direct-hybrid method for aeroacoustic analysis [DOI: 10.18154/RWTH-2017-04082](https://doi.org/10.18154/RWTH-2017-04082) """ -mutable struct EulerAcousticsCouplingCallback{RealT <: Real, MeanValues, IntegratorEuler - } +mutable struct EulerAcousticsCouplingCallback{RealT <: Real, MeanValues, + IntegratorEuler} stepsize_callback_acoustics::StepsizeCallback{RealT} stepsize_callback_euler::StepsizeCallback{RealT} mean_values::MeanValues @@ -85,8 +85,7 @@ The mean values for the acoustic perturbation equations are read from `averaging """ function EulerAcousticsCouplingCallback(ode_euler, averaging_callback::DiscreteCallback{<:Any, - <:AveragingCallback - }, + <:AveragingCallback}, alg, cfl_acoustics::Real, cfl_euler::Real; kwargs...) @unpack mean_values = averaging_callback.affect! diff --git a/src/callbacks_step/glm_speed_dg.jl b/src/callbacks_step/glm_speed_dg.jl index 0686c547a34..302aae356ab 100644 --- a/src/callbacks_step/glm_speed_dg.jl +++ b/src/callbacks_step/glm_speed_dg.jl @@ -7,8 +7,8 @@ function calc_dt_for_cleaning_speed(cfl::Real, mesh, equations::Union{AbstractIdealGlmMhdEquations, - AbstractIdealGlmMhdMulticomponentEquations - }, dg::DG, cache) + AbstractIdealGlmMhdMulticomponentEquations}, + dg::DG, cache) # compute time step for GLM linear advection equation with c_h=1 for the DG discretization on # Cartesian meshes max_scaled_speed_for_c_h = maximum(cache.elements.inverse_jacobian) * @@ -20,8 +20,7 @@ end function calc_dt_for_cleaning_speed(cfl::Real, mesh, equations::Union{AbstractIdealGlmMhdEquations, - AbstractIdealGlmMhdMulticomponentEquations - }, + AbstractIdealGlmMhdMulticomponentEquations}, dg::DGMulti, cache) rd = dg.basis md = mesh.md diff --git a/src/callbacks_step/save_solution.jl b/src/callbacks_step/save_solution.jl index 0092360cb20..c106fe69bcd 100644 --- a/src/callbacks_step/save_solution.jl +++ b/src/callbacks_step/save_solution.jl @@ -39,8 +39,7 @@ end function Base.show(io::IO, cb::DiscreteCallback{<:Any, - <:PeriodicCallbackAffect{<:SaveSolutionCallback - }}) + <:PeriodicCallbackAffect{<:SaveSolutionCallback}}) @nospecialize cb # reduce precompilation time save_solution_callback = cb.affect!.affect! @@ -71,8 +70,7 @@ end function Base.show(io::IO, ::MIME"text/plain", cb::DiscreteCallback{<:Any, - <:PeriodicCallbackAffect{<:SaveSolutionCallback - }}) + <:PeriodicCallbackAffect{<:SaveSolutionCallback}}) @nospecialize cb # reduce precompilation time if get(io, :compact, false) diff --git a/src/callbacks_step/save_solution_dg.jl b/src/callbacks_step/save_solution_dg.jl index 7c015999035..350aee7336a 100644 --- a/src/callbacks_step/save_solution_dg.jl +++ b/src/callbacks_step/save_solution_dg.jl @@ -33,8 +33,7 @@ function save_solution_file(u, time, dt, timestep, # compute the solution variables via broadcasting, and reinterpret the # result as a plain array of floating point numbers data = Array(reinterpret(eltype(u), - solution_variables.(reinterpret(SVector{ - nvariables(equations), + solution_variables.(reinterpret(SVector{nvariables(equations), eltype(u)}, u), Ref(equations)))) @@ -116,8 +115,7 @@ function save_solution_file(u, time, dt, timestep, # compute the solution variables via broadcasting, and reinterpret the # result as a plain array of floating point numbers data = Array(reinterpret(eltype(u), - solution_variables.(reinterpret(SVector{ - nvariables(equations), + solution_variables.(reinterpret(SVector{nvariables(equations), eltype(u)}, u), Ref(equations)))) diff --git a/src/equations/compressible_euler_multicomponent_2d.jl b/src/equations/compressible_euler_multicomponent_2d.jl index fc661e15abf..60fce222f21 100644 --- a/src/equations/compressible_euler_multicomponent_2d.jl +++ b/src/equations/compressible_euler_multicomponent_2d.jl @@ -266,6 +266,29 @@ end return vcat(f_other, f_rho) end +# Calculate 1D flux for a single point +@inline function flux(u, normal_direction::AbstractVector, + equations::CompressibleEulerMulticomponentEquations2D) + rho_v1, rho_v2, rho_e = u + + rho = density(u, equations) + + v1 = rho_v1 / rho + v2 = rho_v2 / rho + v_normal = v1 * normal_direction[1] + v2 * normal_direction[2] + gamma = totalgamma(u, equations) + p = (gamma - 1) * (rho_e - 0.5 * rho * (v1^2 + v2^2)) + + f_rho = densities(u, v_normal, equations) + f1 = rho_v1 * v_normal + p * normal_direction[1] + f2 = rho_v2 * v_normal + p * normal_direction[2] + f3 = (rho_e + p) * v_normal + + f_other = SVector{3, real(equations)}(f1, f2, f3) + + return vcat(f_other, f_rho) +end + """ flux_chandrashekar(u_ll, u_rr, orientation, equations::CompressibleEulerMulticomponentEquations2D) @@ -442,6 +465,76 @@ See also return vcat(f_other, f_rho) end +@inline function flux_ranocha(u_ll, u_rr, normal_direction::AbstractVector, + equations::CompressibleEulerMulticomponentEquations2D) + # Unpack left and right state + @unpack gammas, gas_constants, cv = equations + rho_v1_ll, rho_v2_ll, rho_e_ll = u_ll + rho_v1_rr, rho_v2_rr, rho_e_rr = u_rr + rhok_mean = SVector{ncomponents(equations), real(equations)}(ln_mean(u_ll[i + 3], + u_rr[i + 3]) + for i in eachcomponent(equations)) + rhok_avg = SVector{ncomponents(equations), real(equations)}(0.5 * (u_ll[i + 3] + + u_rr[i + 3]) + for i in eachcomponent(equations)) + + # Iterating over all partial densities + rho_ll = density(u_ll, equations) + rho_rr = density(u_rr, equations) + + # Calculating gamma + gamma = totalgamma(0.5 * (u_ll + u_rr), equations) + inv_gamma_minus_one = 1 / (gamma - 1) + + # extract velocities + v1_ll = rho_v1_ll / rho_ll + v1_rr = rho_v1_rr / rho_rr + v1_avg = 0.5 * (v1_ll + v1_rr) + v2_ll = rho_v2_ll / rho_ll + v2_rr = rho_v2_rr / rho_rr + v2_avg = 0.5 * (v2_ll + v2_rr) + velocity_square_avg = 0.5 * (v1_ll * v1_rr + v2_ll * v2_rr) + v_dot_n_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2] + v_dot_n_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2] + + # helpful variables + help1_ll = zero(v1_ll) + help1_rr = zero(v1_rr) + enth_ll = zero(v1_ll) + enth_rr = zero(v1_rr) + for i in eachcomponent(equations) + enth_ll += u_ll[i + 3] * gas_constants[i] + enth_rr += u_rr[i + 3] * gas_constants[i] + help1_ll += u_ll[i + 3] * cv[i] + help1_rr += u_rr[i + 3] * cv[i] + end + + # temperature and pressure + T_ll = (rho_e_ll - 0.5 * rho_ll * (v1_ll^2 + v2_ll^2)) / help1_ll + T_rr = (rho_e_rr - 0.5 * rho_rr * (v1_rr^2 + v2_rr^2)) / help1_rr + p_ll = T_ll * enth_ll + p_rr = T_rr * enth_rr + p_avg = 0.5 * (p_ll + p_rr) + inv_rho_p_mean = p_ll * p_rr * inv_ln_mean(rho_ll * p_rr, rho_rr * p_ll) + + f_rho_sum = zero(T_rr) + f_rho = SVector{ncomponents(equations), real(equations)}(rhok_mean[i] * 0.5 * + (v_dot_n_ll + v_dot_n_rr) + for i in eachcomponent(equations)) + for i in eachcomponent(equations) + f_rho_sum += f_rho[i] + end + f1 = f_rho_sum * v1_avg + p_avg * normal_direction[1] + f2 = f_rho_sum * v2_avg + p_avg * normal_direction[2] + f3 = f_rho_sum * (velocity_square_avg + inv_rho_p_mean * inv_gamma_minus_one) + + 0.5 * (p_ll * v_dot_n_rr + p_rr * v_dot_n_ll) + + # momentum and energy flux + f_other = SVector(f1, f2, f3) + + return vcat(f_other, f_rho) +end + # Calculate maximum wave speed for local Lax-Friedrichs-type dissipation @inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, equations::CompressibleEulerMulticomponentEquations2D) @@ -487,6 +580,50 @@ end return (abs(v1) + c, abs(v2) + c) end +@inline function rotate_to_x(u, normal_vector, + equations::CompressibleEulerMulticomponentEquations2D) + # cos and sin of the angle between the x-axis and the normalized normal_vector are + # the normalized vector's x and y coordinates respectively (see unit circle). + c = normal_vector[1] + s = normal_vector[2] + + # Apply the 2D rotation matrix with normal and tangent directions of the form + # [ n_1 n_2 0 0; + # t_1 t_2 0 0; + # 0 0 1 0 + # 0 0 0 1] + # where t_1 = -n_2 and t_2 = n_1 + + densities = @view u[4:end] + return SVector(c * u[1] + s * u[2], + -s * u[1] + c * u[2], + u[3], + densities...) +end + +# Called inside `FluxRotated` in `numerical_fluxes.jl` so the direction +# has been normalized prior to this back-rotation of the state vector +@inline function rotate_from_x(u, normal_vector, + equations::CompressibleEulerMulticomponentEquations2D) + # cos and sin of the angle between the x-axis and the normalized normal_vector are + # the normalized vector's x and y coordinates respectively (see unit circle). + c = normal_vector[1] + s = normal_vector[2] + + # Apply the 2D back-rotation matrix with normal and tangent directions of the form + # [ n_1 t_1 0 0; + # n_2 t_2 0 0; + # 0 0 1 0; + # 0 0 0 1 ] + # where t_1 = -n_2 and t_2 = n_1 + + densities = @view u[4:end] + return SVector(c * u[1] - s * u[2], + s * u[1] + c * u[2], + u[3], + densities...) +end + # Convert conservative variables to primitive @inline function cons2prim(u, equations::CompressibleEulerMulticomponentEquations2D) rho_v1, rho_v2, rho_e = u diff --git a/src/equations/compressible_navier_stokes_1d.jl b/src/equations/compressible_navier_stokes_1d.jl index 73436c99b7c..d2c46ecc7d8 100644 --- a/src/equations/compressible_navier_stokes_1d.jl +++ b/src/equations/compressible_navier_stokes_1d.jl @@ -81,8 +81,7 @@ w_2 = \frac{\rho v1}{p},\, w_3 = -\frac{\rho}{p} ``` """ struct CompressibleNavierStokesDiffusion1D{GradientVariables, RealT <: Real, - E <: AbstractCompressibleEulerEquations{1} - } <: + E <: AbstractCompressibleEulerEquations{1}} <: AbstractCompressibleNavierStokesDiffusion{1, 3, GradientVariables} # TODO: parabolic # 1) For now save gamma and inv(gamma-1) again, but could potentially reuse them from the Euler equations @@ -130,14 +129,10 @@ end # we specialize this function to compute gradients of primitive variables instead of # conservative variables. -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) cons2prim end -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) cons2entropy end @@ -202,17 +197,13 @@ end # For CNS, it is simplest to formulate the viscous terms in primitive variables, so we transform the transformed # variables into primitive variables. @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) return u_transformed end # TODO: parabolic. Make this more efficient! @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) # note: this uses CompressibleNavierStokesDiffusion1D versions of cons2prim and entropy2cons return cons2prim(entropy2cons(u_transformed, equations), equations) end @@ -223,17 +214,13 @@ end # Note, the first component of `gradient_entropy_vars` contains gradient(rho) which is unused. # TODO: parabolic; entropy stable viscous terms @inline function convert_derivative_to_primitive(u, gradient, - ::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + ::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) return gradient end # the first argument is always the "transformed" variables. @inline function convert_derivative_to_primitive(w, gradient_entropy_vars, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) # TODO: parabolic. This is inefficient to pass in transformed variables but then transform them back. # We can fix this if we directly compute v1, v2, T from the entropy variables @@ -272,9 +259,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) v1 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) return SVector(u_inner[1], v1, u_inner[3]) @@ -288,9 +273,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) # rho, v1, v2, _ = u_inner normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, @@ -310,9 +293,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) v1 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) T = boundary_condition.boundary_condition_heat_flux.boundary_value_function(x, t, @@ -328,9 +309,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesPrimitive}) return flux_inner end @@ -350,9 +329,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) v1 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) negative_rho_inv_p = w_inner[3] # w_3 = -rho / p @@ -368,9 +345,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, equations) @@ -389,9 +364,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) v1 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) T = boundary_condition.boundary_condition_heat_flux.boundary_value_function(x, t, @@ -410,9 +383,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion1D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion1D{GradientVariablesEntropy}) return SVector(flux_inner[1], flux_inner[2], flux_inner[3]) end end # @muladd diff --git a/src/equations/compressible_navier_stokes_2d.jl b/src/equations/compressible_navier_stokes_2d.jl index ad0db001872..5df7c01ca5c 100644 --- a/src/equations/compressible_navier_stokes_2d.jl +++ b/src/equations/compressible_navier_stokes_2d.jl @@ -81,8 +81,7 @@ w_2 = \frac{\rho v_1}{p},\, w_3 = \frac{\rho v_2}{p},\, w_4 = -\frac{\rho}{p} ``` """ struct CompressibleNavierStokesDiffusion2D{GradientVariables, RealT <: Real, - E <: AbstractCompressibleEulerEquations{2} - } <: + E <: AbstractCompressibleEulerEquations{2}} <: AbstractCompressibleNavierStokesDiffusion{2, 4, GradientVariables} # TODO: parabolic # 1) For now save gamma and inv(gamma-1) again, but could potentially reuse them from the Euler equations @@ -130,14 +129,10 @@ end # we specialize this function to compute gradients of primitive variables instead of # conservative variables. -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) cons2prim end -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) cons2entropy end @@ -224,17 +219,13 @@ end # For CNS, it is simplest to formulate the viscous terms in primitive variables, so we transform the transformed # variables into primitive variables. @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) return u_transformed end # TODO: parabolic. Make this more efficient! @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) # note: this uses CompressibleNavierStokesDiffusion2D versions of cons2prim and entropy2cons return cons2prim(entropy2cons(u_transformed, equations), equations) end @@ -245,17 +236,13 @@ end # Note, the first component of `gradient_entropy_vars` contains gradient(rho) which is unused. # TODO: parabolic; entropy stable viscous terms @inline function convert_derivative_to_primitive(u, gradient, - ::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + ::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) return gradient end # the first argument is always the "transformed" variables. @inline function convert_derivative_to_primitive(w, gradient_entropy_vars, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) # TODO: parabolic. This is inefficient to pass in transformed variables but then transform them back. # We can fix this if we directly compute v1, v2, T from the entropy variables @@ -310,9 +297,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -326,9 +311,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) # rho, v1, v2, _ = u_inner normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, @@ -348,9 +331,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -366,9 +347,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) return flux_inner end @@ -387,9 +366,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -406,9 +383,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, equations) @@ -427,9 +402,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) v1, v2 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -448,9 +421,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesEntropy}) return SVector(flux_inner[1], flux_inner[2], flux_inner[3], flux_inner[4]) end @@ -461,9 +432,7 @@ end normal::AbstractVector, x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) # BCs are usually specified as conservative variables so we convert them to primitive variables # because the gradients are assumed to be with respect to the primitive variables u_boundary = boundary_condition.boundary_value_function(x, t, equations) @@ -476,9 +445,7 @@ end normal::AbstractVector, x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion2D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion2D{GradientVariablesPrimitive}) # for Dirichlet boundary conditions, we do not impose any conditions on the viscous fluxes return flux_inner end diff --git a/src/equations/compressible_navier_stokes_3d.jl b/src/equations/compressible_navier_stokes_3d.jl index c6a55983b53..e5567ae5789 100644 --- a/src/equations/compressible_navier_stokes_3d.jl +++ b/src/equations/compressible_navier_stokes_3d.jl @@ -81,8 +81,7 @@ w_2 = \frac{\rho v_1}{p},\, w_3 = \frac{\rho v_2}{p},\, w_4 = \frac{\rho v_3}{p} ``` """ struct CompressibleNavierStokesDiffusion3D{GradientVariables, RealT <: Real, - E <: AbstractCompressibleEulerEquations{3} - } <: + E <: AbstractCompressibleEulerEquations{3}} <: AbstractCompressibleNavierStokesDiffusion{3, 5, GradientVariables} # TODO: parabolic # 1) For now save gamma and inv(gamma-1) again, but could potentially reuse them from the Euler equations @@ -130,14 +129,10 @@ end # we specialize this function to compute gradients of primitive variables instead of # conservative variables. -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) cons2prim end -function gradient_variable_transformation(::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) +function gradient_variable_transformation(::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) cons2entropy end @@ -250,17 +245,13 @@ end # For CNS, it is simplest to formulate the viscous terms in primitive variables, so we transform the transformed # variables into primitive variables. @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) return u_transformed end # TODO: parabolic. Make this more efficient! @inline function convert_transformed_to_primitive(u_transformed, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) # note: this uses CompressibleNavierStokesDiffusion3D versions of cons2prim and entropy2cons return cons2prim(entropy2cons(u_transformed, equations), equations) end @@ -271,17 +262,13 @@ end # Note, the first component of `gradient_entropy_vars` contains gradient(rho) which is unused. # TODO: parabolic; entropy stable viscous terms @inline function convert_derivative_to_primitive(u, gradient, - ::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + ::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) return gradient end # the first argument is always the "transformed" variables. @inline function convert_derivative_to_primitive(w, gradient_entropy_vars, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) # TODO: parabolic. This is inefficient to pass in transformed variables but then transform them back. # We can fix this if we directly compute v1, v2, v3, T from the entropy variables @@ -342,9 +329,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) v1, v2, v3 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -358,9 +343,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) # rho, v1, v2, v3, _ = u_inner normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, @@ -381,9 +364,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) v1, v2, v3 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -399,9 +380,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesPrimitive - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesPrimitive}) return flux_inner end @@ -420,9 +399,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) v1, v2, v3 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -439,9 +416,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) normal_heat_flux = boundary_condition.boundary_condition_heat_flux.boundary_value_normal_flux_function(x, t, equations) @@ -461,9 +436,7 @@ end x, t, operator_type::Gradient, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) v1, v2, v3 = boundary_condition.boundary_condition_velocity.boundary_value_function(x, t, equations) @@ -482,9 +455,7 @@ end x, t, operator_type::Divergence, - equations::CompressibleNavierStokesDiffusion3D{ - GradientVariablesEntropy - }) + equations::CompressibleNavierStokesDiffusion3D{GradientVariablesEntropy}) return SVector(flux_inner[1], flux_inner[2], flux_inner[3], flux_inner[4], flux_inner[5]) end diff --git a/src/equations/equations.jl b/src/equations/equations.jl index ba2ad1cd1cd..582d672b756 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -260,7 +260,14 @@ combined with certain solvers (e.g., subcell limiting). function n_nonconservative_terms end have_constant_speed(::AbstractEquations) = False() +""" + default_analysis_errors(equations) + +Default analysis errors (`:l2_error` and `:linf_error`) used by the +[`AnalysisCallback`](@ref). +""" default_analysis_errors(::AbstractEquations) = (:l2_error, :linf_error) + """ default_analysis_integrals(equations) diff --git a/src/equations/ideal_glm_mhd_1d.jl b/src/equations/ideal_glm_mhd_1d.jl index 85030e8a5ad..5a523daf3f6 100644 --- a/src/equations/ideal_glm_mhd_1d.jl +++ b/src/equations/ideal_glm_mhd_1d.jl @@ -259,6 +259,152 @@ Hindenlang and Gassner (2019), extending [`flux_ranocha`](@ref) to the MHD equat return SVector(f1, f2, f3, f4, f5, f6, f7, f8) end +""" + flux_hllc(u_ll, u_rr, orientation, equations::IdealGlmMhdEquations1D) + +- Li (2005) +An HLLC Riemann solver for magneto-hydrodynamics +[DOI: 10.1016/j.jcp.2004.08.020](https://doi.org/10.1016/j.jcp.2004.08.020). +""" +function flux_hllc(u_ll, u_rr, orientation::Integer, + equations::IdealGlmMhdEquations1D) + # Unpack left and right states + rho_ll, v1_ll, v2_ll, v3_ll, p_ll, B1_ll, B2_ll, B3_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr, v3_rr, p_rr, B1_rr, B2_rr, B3_rr = cons2prim(u_rr, equations) + + # Total pressure, i.e., thermal + magnetic pressures (eq. (12)) + p_tot_ll = p_ll + 0.5 * (B1_ll^2 + B2_ll^2 + B3_ll^2) + p_tot_rr = p_rr + 0.5 * (B1_rr^2 + B2_rr^2 + B3_rr^2) + + # Conserved variables + rho_v1_ll = u_ll[2] + rho_v2_ll = u_ll[3] + rho_v3_ll = u_ll[4] + + rho_v1_rr = u_rr[2] + rho_v2_rr = u_rr[3] + rho_v3_rr = u_rr[4] + + # Obtain left and right fluxes + f_ll = flux(u_ll, orientation, equations) + f_rr = flux(u_rr, orientation, equations) + + SsL, SsR = min_max_speed_einfeldt(u_ll, u_rr, orientation, equations) + sMu_L = SsL - v1_ll + sMu_R = SsR - v1_rr + if SsL >= 0 + f1 = f_ll[1] + f2 = f_ll[2] + f3 = f_ll[3] + f4 = f_ll[4] + f5 = f_ll[5] + f6 = f_ll[6] + f7 = f_ll[7] + f8 = f_ll[8] + elseif SsR <= 0 + f1 = f_rr[1] + f2 = f_rr[2] + f3 = f_rr[3] + f4 = f_rr[4] + f5 = f_rr[5] + f6 = f_rr[6] + f7 = f_rr[7] + f8 = f_rr[8] + else + # Compute the "HLLC-speed", eq. (14) from paper mentioned above + #= + SStar = (rho_rr * v1_rr * sMu_R - rho_ll * v1_ll * sMu_L + p_tot_ll - p_tot_rr - B1_ll^2 + B1_rr^2 ) / + (rho_rr * sMu_R - rho_ll * sMu_L) + =# + # Simplification for 1D: B1 is constant + SStar = (rho_rr * v1_rr * sMu_R - rho_ll * v1_ll * sMu_L + p_tot_ll - p_tot_rr) / + (rho_rr * sMu_R - rho_ll * sMu_L) + + Sdiff = SsR - SsL + + # Compute HLL values for vStar, BStar + # These correspond to eq. (28) and (30) from the referenced paper + # and the classic HLL intermediate state given by (2) + rho_HLL = (SsR * rho_rr - SsL * rho_ll - (f_rr[1] - f_ll[1])) / Sdiff + + v1Star = (SsR * rho_v1_rr - SsL * rho_v1_ll - (f_rr[2] - f_ll[2])) / + (Sdiff * rho_HLL) + v2Star = (SsR * rho_v2_rr - SsL * rho_v2_ll - (f_rr[3] - f_ll[3])) / + (Sdiff * rho_HLL) + v3Star = (SsR * rho_v3_rr - SsL * rho_v3_ll - (f_rr[4] - f_ll[4])) / + (Sdiff * rho_HLL) + + #B1Star = (SsR * B1_rr - SsL * B1_ll - (f_rr[6] - f_ll[6])) / Sdiff + # 1D B1 = constant => B1_ll = B1_rr = B1Star + B1Star = B1_ll + + B2Star = (SsR * B2_rr - SsL * B2_ll - (f_rr[7] - f_ll[7])) / Sdiff + B3Star = (SsR * B3_rr - SsL * B3_ll - (f_rr[8] - f_ll[8])) / Sdiff + if SsL <= SStar + SdiffStar = SsL - SStar + + densStar = rho_ll * sMu_L / SdiffStar # (19) + + mom_1_Star = densStar * SStar # (20) + mom_2_Star = densStar * v2_ll - + (B1Star * B2Star - B1_ll * B2_ll) / SdiffStar # (21) + mom_3_Star = densStar * v3_ll - + (B1Star * B3Star - B1_ll * B3_ll) / SdiffStar # (22) + + #p_tot_Star = rho_ll * sMu_L * (SStar - v1_ll) + p_tot_ll - B1_ll^2 + B1Star^2 # (17) + # 1D B1 = constant => B1_ll = B1_rr = B1Star + p_tot_Star = rho_ll * sMu_L * (SStar - v1_ll) + p_tot_ll # (17) + + enerStar = u_ll[5] * sMu_L / SdiffStar + + (p_tot_Star * SStar - p_tot_ll * v1_ll - (B1Star * + (B1Star * v1Star + B2Star * v2Star + B3Star * v3Star) - + B1_ll * (B1_ll * v1_ll + B2_ll * v2_ll + B3_ll * v3_ll))) / + SdiffStar # (23) + + # Classic HLLC update (32) + f1 = f_ll[1] + SsL * (densStar - u_ll[1]) + f2 = f_ll[2] + SsL * (mom_1_Star - u_ll[2]) + f3 = f_ll[3] + SsL * (mom_2_Star - u_ll[3]) + f4 = f_ll[4] + SsL * (mom_3_Star - u_ll[4]) + f5 = f_ll[5] + SsL * (enerStar - u_ll[5]) + f6 = f_ll[6] + SsL * (B1Star - u_ll[6]) + f7 = f_ll[7] + SsL * (B2Star - u_ll[7]) + f8 = f_ll[8] + SsL * (B3Star - u_ll[8]) + else # SStar <= Ssr + SdiffStar = SsR - SStar + + densStar = rho_rr * sMu_R / SdiffStar # (19) + + mom_1_Star = densStar * SStar # (20) + mom_2_Star = densStar * v2_rr - + (B1Star * B2Star - B1_rr * B2_rr) / SdiffStar # (21) + mom_3_Star = densStar * v3_rr - + (B1Star * B3Star - B1_rr * B3_rr) / SdiffStar # (22) + + #p_tot_Star = rho_rr * sMu_R * (SStar - v1_rr) + p_tot_rr - B1_rr^2 + B1Star^2 # (17) + # 1D B1 = constant => B1_ll = B1_rr = B1Star + p_tot_Star = rho_rr * sMu_R * (SStar - v1_rr) + p_tot_rr # (17) + + enerStar = u_rr[5] * sMu_R / SdiffStar + + (p_tot_Star * SStar - p_tot_rr * v1_rr - (B1Star * + (B1Star * v1Star + B2Star * v2Star + B3Star * v3Star) - + B1_rr * (B1_rr * v1_rr + B2_rr * v2_rr + B3_rr * v3_rr))) / + SdiffStar # (23) + + # Classic HLLC update (32) + f1 = f_rr[1] + SsR * (densStar - u_rr[1]) + f2 = f_rr[2] + SsR * (mom_1_Star - u_rr[2]) + f3 = f_rr[3] + SsR * (mom_2_Star - u_rr[3]) + f4 = f_rr[4] + SsR * (mom_3_Star - u_rr[4]) + f5 = f_rr[5] + SsR * (enerStar - u_rr[5]) + f6 = f_rr[6] + SsR * (B1Star - u_rr[6]) + f7 = f_rr[7] + SsR * (B2Star - u_rr[7]) + f8 = f_rr[8] + SsR * (B3Star - u_rr[8]) + end + end + return SVector(f1, f2, f3, f4, f5, f6, f7, f8) +end + # Calculate maximum wave speed for local Lax-Friedrichs-type dissipation @inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, equations::IdealGlmMhdEquations1D) diff --git a/src/equations/ideal_glm_mhd_multicomponent_1d.jl b/src/equations/ideal_glm_mhd_multicomponent_1d.jl index 0efa6426448..dad7c27e86c 100644 --- a/src/equations/ideal_glm_mhd_multicomponent_1d.jl +++ b/src/equations/ideal_glm_mhd_multicomponent_1d.jl @@ -17,19 +17,15 @@ mutable struct IdealGlmMhdMulticomponentEquations1D{NVARS, NCOMP, RealT <: Real} cv::SVector{NCOMP, RealT} cp::SVector{NCOMP, RealT} - function IdealGlmMhdMulticomponentEquations1D{NVARS, NCOMP, RealT}(gammas::SVector{ - NCOMP, - RealT - }, - gas_constants::SVector{ - NCOMP, - RealT - }) where { - NVARS, - NCOMP, - RealT <: - Real - } + function IdealGlmMhdMulticomponentEquations1D{NVARS, NCOMP, RealT}(gammas::SVector{NCOMP, + RealT}, + gas_constants::SVector{NCOMP, + RealT}) where { + NVARS, + NCOMP, + RealT <: + Real + } NCOMP >= 1 || throw(DimensionMismatch("`gammas` and `gas_constants` have to be filled with at least one value")) diff --git a/src/equations/ideal_glm_mhd_multicomponent_2d.jl b/src/equations/ideal_glm_mhd_multicomponent_2d.jl index 9b0eeb411e8..a3a50c0485f 100644 --- a/src/equations/ideal_glm_mhd_multicomponent_2d.jl +++ b/src/equations/ideal_glm_mhd_multicomponent_2d.jl @@ -18,19 +18,15 @@ mutable struct IdealGlmMhdMulticomponentEquations2D{NVARS, NCOMP, RealT <: Real} cp::SVector{NCOMP, RealT} c_h::RealT # GLM cleaning speed - function IdealGlmMhdMulticomponentEquations2D{NVARS, NCOMP, RealT}(gammas::SVector{ - NCOMP, - RealT - }, - gas_constants::SVector{ - NCOMP, - RealT - }) where { - NVARS, - NCOMP, - RealT <: - Real - } + function IdealGlmMhdMulticomponentEquations2D{NVARS, NCOMP, RealT}(gammas::SVector{NCOMP, + RealT}, + gas_constants::SVector{NCOMP, + RealT}) where { + NVARS, + NCOMP, + RealT <: + Real + } NCOMP >= 1 || throw(DimensionMismatch("`gammas` and `gas_constants` have to be filled with at least one value")) diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl index 71782644b17..44d523b6e89 100644 --- a/src/equations/numerical_fluxes.jl +++ b/src/equations/numerical_fluxes.jl @@ -61,6 +61,23 @@ struct FluxRotated{NumericalFlux} numerical_flux::NumericalFlux end +# Rotated surface flux computation (2D version) +@inline function (flux_rotated::FluxRotated)(u, + normal_direction::AbstractVector, + equations::AbstractEquations{2}) + @unpack numerical_flux = flux_rotated + + norm_ = norm(normal_direction) + # Normalize the vector without using `normalize` since we need to multiply by the `norm_` later + normal_vector = normal_direction / norm_ + + u_rotated = rotate_to_x(u, normal_vector, equations) + + f = numerical_flux(u_rotated, 1, equations) + + return rotate_from_x(f, normal_vector, equations) * norm_ +end + # Rotated surface flux computation (2D version) @inline function (flux_rotated::FluxRotated)(u_ll, u_rr, normal_direction::AbstractVector, @@ -181,10 +198,7 @@ function max_abs_speed_naive end end const FluxLaxFriedrichs{MaxAbsSpeed} = FluxPlusDissipation{typeof(flux_central), - DissipationLocalLaxFriedrichs{ - MaxAbsSpeed - } - } + DissipationLocalLaxFriedrichs{MaxAbsSpeed}} """ FluxLaxFriedrichs(max_abs_speed=max_abs_speed_naive) diff --git a/src/equations/polytropic_euler_2d.jl b/src/equations/polytropic_euler_2d.jl index d4902bbafb2..f5d2f7b0bad 100644 --- a/src/equations/polytropic_euler_2d.jl +++ b/src/equations/polytropic_euler_2d.jl @@ -120,7 +120,7 @@ function initial_condition_weak_blast_wave(x, t, equations::PolytropicEulerEquat return prim2cons(SVector(rho, v1, v2), equations) end -# Calculate 1D flux for a single point in the normal direction +# Calculate 2D flux for a single point in the normal direction # Note, this directional vector is not normalized @inline function flux(u, normal_direction::AbstractVector, equations::PolytropicEulerEquations2D) @@ -135,8 +135,28 @@ end return SVector(f1, f2, f3) end +# Calculate 2D flux for a single point +@inline function flux(u, orientation::Integer, equations::PolytropicEulerEquations2D) + _, v1, v2 = cons2prim(u, equations) + p = pressure(u, equations) + + rho_v1 = u[2] + rho_v2 = u[3] + + if orientation == 1 + f1 = rho_v1 + f2 = rho_v1 * v1 + p + f3 = rho_v1 * v2 + else + f1 = rho_v2 + f2 = rho_v2 * v1 + f3 = rho_v2 * v2 + p + end + return SVector(f1, f2, f3) +end + """ - flux_winters_etal(u_ll, u_rr, normal_direction, + flux_winters_etal(u_ll, u_rr, orientation_or_normal_direction, equations::PolytropicEulerEquations2D) Entropy conserving two-point flux for isothermal or polytropic gases. @@ -178,6 +198,37 @@ For details see Section 3.2 of the following reference return SVector(f1, f2, f3) end +@inline function flux_winters_etal(u_ll, u_rr, orientation::Integer, + equations::PolytropicEulerEquations2D) + # Unpack left and right state + rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations) + p_ll = equations.kappa * rho_ll^equations.gamma + p_rr = equations.kappa * rho_rr^equations.gamma + + # Compute the necessary mean values + if equations.gamma == 1.0 # isothermal gas + rho_mean = ln_mean(rho_ll, rho_rr) + else # equations.gamma > 1 # polytropic gas + rho_mean = stolarsky_mean(rho_ll, rho_rr, equations.gamma) + end + v1_avg = 0.5 * (v1_ll + v1_rr) + v2_avg = 0.5 * (v2_ll + v2_rr) + p_avg = 0.5 * (p_ll + p_rr) + + if orientation == 1 # x-direction + f1 = rho_mean * 0.5 * (v1_ll + v1_rr) + f2 = f1 * v1_avg + p_avg + f3 = f1 * v2_avg + else # y-direction + f1 = rho_mean * 0.5 * (v2_ll + v2_rr) + f2 = f1 * v1_avg + f3 = f1 * v2_avg + p_avg + end + + return SVector(f1, f2, f3) +end + @inline function min_max_speed_naive(u_ll, u_rr, normal_direction::AbstractVector, equations::PolytropicEulerEquations2D) rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations) @@ -196,6 +247,53 @@ end return lambda_min, lambda_max end +# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes +@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer, + equations::PolytropicEulerEquations2D) + rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations) + # Pressure for polytropic Euler + p_ll = equations.kappa * rho_ll^equations.gamma + p_rr = equations.kappa * rho_rr^equations.gamma + + c_ll = sqrt(equations.gamma * p_ll / rho_ll) + c_rr = sqrt(equations.gamma * p_rr / rho_rr) + + if orientation == 1 # x-direction + λ_min = min(v1_ll - c_ll, v1_rr - c_rr) + λ_max = max(v1_ll + c_ll, v1_rr + c_rr) + else # y-direction + λ_min = min(v2_ll - c_ll, v2_rr - c_rr) + λ_max = max(v2_ll + c_ll, v2_rr + c_rr) + end + + return λ_min, λ_max +end + +# More refined estimates for minimum and maximum wave speeds for HLL-type fluxes +@inline function min_max_speed_davis(u_ll, u_rr, normal_direction::AbstractVector, + equations::PolytropicEulerEquations2D) + rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations) + # Pressure for polytropic Euler + p_ll = equations.kappa * rho_ll^equations.gamma + p_rr = equations.kappa * rho_rr^equations.gamma + + norm_ = norm(normal_direction) + + c_ll = sqrt(equations.gamma * p_ll / rho_ll) * norm_ + c_rr = sqrt(equations.gamma * p_rr / rho_rr) * norm_ + + v_normal_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2] + v_normal_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2] + + # The v_normals are already scaled by the norm + λ_min = min(v_normal_ll - c_ll, v_normal_rr - c_rr) + λ_max = max(v_normal_ll + c_ll, v_normal_rr + c_rr) + + return λ_min, λ_max +end + @inline function max_abs_speeds(u, equations::PolytropicEulerEquations2D) rho, v1, v2 = cons2prim(u, equations) c = sqrt(equations.gamma * equations.kappa * rho^(equations.gamma - 1)) diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl index 92e38ce1bf3..337e33e6969 100644 --- a/src/meshes/mesh_io.jl +++ b/src/meshes/mesh_io.jl @@ -30,6 +30,7 @@ function save_mesh_file(mesh::TreeMesh, output_directory, timestep, attributes(file)["mesh_type"] = get_name(mesh) attributes(file)["ndims"] = ndims(mesh) attributes(file)["n_cells"] = n_cells + attributes(file)["capacity"] = mesh.tree.capacity attributes(file)["n_leaf_cells"] = count_leaf_cells(mesh.tree) attributes(file)["minimum_level"] = minimum_level(mesh.tree) attributes(file)["maximum_level"] = maximum_level(mesh.tree) @@ -249,10 +250,10 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) end if mesh_type == "TreeMesh" - n_cells = h5open(mesh_file, "r") do file - return read(attributes(file)["n_cells"]) + capacity = h5open(mesh_file, "r") do file + return read(attributes(file)["capacity"]) end - mesh = TreeMesh(SerialTree{ndims}, max(n_cells, n_cells_max)) + mesh = TreeMesh(SerialTree{ndims}, max(n_cells_max, capacity)) load_mesh!(mesh, mesh_file) elseif mesh_type == "StructuredMesh" size_, mapping_as_string = h5open(mesh_file, "r") do file diff --git a/src/meshes/unstructured_mesh.jl b/src/meshes/unstructured_mesh.jl index c370c0f25f8..fae52f834b3 100644 --- a/src/meshes/unstructured_mesh.jl +++ b/src/meshes/unstructured_mesh.jl @@ -15,8 +15,9 @@ An unstructured (possibly curved) quadrilateral mesh. All mesh information, neighbour coupling, and boundary curve information is read in from a mesh file `filename`. """ -mutable struct UnstructuredMesh2D{RealT <: Real, CurvedSurfaceT <: CurvedSurface{RealT} - } <: AbstractMesh{2} +mutable struct UnstructuredMesh2D{RealT <: Real, + CurvedSurfaceT <: CurvedSurface{RealT}} <: + AbstractMesh{2} filename :: String n_corners :: Int n_surfaces :: Int # total number of surfaces diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 49763b12e5d..0941ae6a8ca 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -41,8 +41,9 @@ function SemidiscretizationCoupled(semis...) performance_counter = PerformanceCounter() - SemidiscretizationCoupled{typeof(semis), typeof(u_indices), typeof(performance_counter) - }(semis, u_indices, performance_counter) + SemidiscretizationCoupled{typeof(semis), typeof(u_indices), + typeof(performance_counter)}(semis, u_indices, + performance_counter) end function Base.show(io::IO, semi::SemidiscretizationCoupled) @@ -432,8 +433,7 @@ function allocate_coupled_boundary_condition(boundary_condition, direction, mesh end # In 2D -function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditionCoupled{2 - }, +function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditionCoupled{2}, direction, mesh, equations, dg::DGSEM) if direction in (1, 2) cell_size = size(mesh, 2) diff --git a/src/semidiscretization/semidiscretization_hyperbolic.jl b/src/semidiscretization/semidiscretization_hyperbolic.jl index 9d465bfcc5f..7ebd758de37 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic.jl @@ -29,17 +29,18 @@ struct SemidiscretizationHyperbolic{Mesh, Equations, InitialCondition, performance_counter::PerformanceCounter function SemidiscretizationHyperbolic{Mesh, Equations, InitialCondition, - BoundaryConditions, SourceTerms, Solver, Cache - }(mesh::Mesh, equations::Equations, - initial_condition::InitialCondition, - boundary_conditions::BoundaryConditions, - source_terms::SourceTerms, - solver::Solver, - cache::Cache) where {Mesh, Equations, - InitialCondition, - BoundaryConditions, - SourceTerms, Solver, - Cache} + BoundaryConditions, SourceTerms, Solver, + Cache}(mesh::Mesh, equations::Equations, + initial_condition::InitialCondition, + boundary_conditions::BoundaryConditions, + source_terms::SourceTerms, + solver::Solver, + cache::Cache) where {Mesh, Equations, + InitialCondition, + BoundaryConditions, + SourceTerms, + Solver, + Cache} @assert ndims(mesh) == ndims(equations) performance_counter = PerformanceCounter() @@ -268,8 +269,7 @@ end function print_boundary_conditions(io, semi::SemiHypMeshBCSolver{<:Any, - <:UnstructuredSortedBoundaryTypes - }) + <:UnstructuredSortedBoundaryTypes}) @unpack boundary_conditions = semi @unpack boundary_dictionary = boundary_conditions summary_line(io, "boundary conditions", length(boundary_dictionary)) @@ -289,8 +289,7 @@ function print_boundary_conditions(io, semi::SemiHypMeshBCSolver{<:Any, <:NamedT end function print_boundary_conditions(io, - semi::SemiHypMeshBCSolver{ - <:Union{TreeMesh, + semi::SemiHypMeshBCSolver{<:Union{TreeMesh, StructuredMesh}, <:Union{Tuple, NamedTuple, AbstractArray}}) diff --git a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl index 35340d65b1e..0f44941390a 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl @@ -45,30 +45,29 @@ struct SemidiscretizationHyperbolicParabolic{Mesh, Equations, EquationsParabolic BoundaryConditionsParabolic, SourceTerms, Solver, SolverParabolic, Cache, - CacheParabolic - }(mesh::Mesh, - equations::Equations, - equations_parabolic::EquationsParabolic, - initial_condition::InitialCondition, - boundary_conditions::BoundaryConditions, - boundary_conditions_parabolic::BoundaryConditionsParabolic, - source_terms::SourceTerms, - solver::Solver, - solver_parabolic::SolverParabolic, - cache::Cache, - cache_parabolic::CacheParabolic) where { - Mesh, - Equations, - EquationsParabolic, - InitialCondition, - BoundaryConditions, - BoundaryConditionsParabolic, - SourceTerms, - Solver, - SolverParabolic, - Cache, - CacheParabolic - } + CacheParabolic}(mesh::Mesh, + equations::Equations, + equations_parabolic::EquationsParabolic, + initial_condition::InitialCondition, + boundary_conditions::BoundaryConditions, + boundary_conditions_parabolic::BoundaryConditionsParabolic, + source_terms::SourceTerms, + solver::Solver, + solver_parabolic::SolverParabolic, + cache::Cache, + cache_parabolic::CacheParabolic) where { + Mesh, + Equations, + EquationsParabolic, + InitialCondition, + BoundaryConditions, + BoundaryConditionsParabolic, + SourceTerms, + Solver, + SolverParabolic, + Cache, + CacheParabolic + } @assert ndims(mesh) == ndims(equations) # Todo: assert nvariables(equations)==nvariables(equations_parabolic) diff --git a/src/solvers/dgmulti/flux_differencing.jl b/src/solvers/dgmulti/flux_differencing.jl index 884a8fac43b..36aa50dff4e 100644 --- a/src/solvers/dgmulti/flux_differencing.jl +++ b/src/solvers/dgmulti/flux_differencing.jl @@ -88,8 +88,8 @@ end # Version for sparse operators and symmetric fluxes @inline function hadamard_sum!(du, - A::LinearAlgebra.Adjoint{<:Any, <:AbstractSparseMatrixCSC - }, + A::LinearAlgebra.Adjoint{<:Any, + <:AbstractSparseMatrixCSC}, flux_is_symmetric::True, volume_flux, orientation_or_normal_direction, u, equations) A_base = parent(A) # the adjoint of a SparseMatrixCSC is basically a SparseMatrixCSR @@ -122,8 +122,8 @@ end # Version for sparse operators and symmetric fluxes with curved meshes @inline function hadamard_sum!(du, - A::LinearAlgebra.Adjoint{<:Any, <:AbstractSparseMatrixCSC - }, + A::LinearAlgebra.Adjoint{<:Any, + <:AbstractSparseMatrixCSC}, flux_is_symmetric::True, volume_flux, normal_directions::AbstractVector{<:AbstractVector}, u, equations) @@ -161,8 +161,8 @@ end # TODO: DGMulti. Fix for curved meshes. # Version for sparse operators and non-symmetric fluxes @inline function hadamard_sum!(du, - A::LinearAlgebra.Adjoint{<:Any, <:AbstractSparseMatrixCSC - }, + A::LinearAlgebra.Adjoint{<:Any, + <:AbstractSparseMatrixCSC}, flux_is_symmetric::False, volume_flux, normal_direction::AbstractVector, u, equations) A_base = parent(A) # the adjoint of a SparseMatrixCSC is basically a SparseMatrixCSR diff --git a/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl b/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl index 2c5505cc4e9..9059caf87f6 100644 --- a/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl +++ b/src/solvers/dgmulti/flux_differencing_gauss_sbp.jl @@ -380,9 +380,8 @@ function create_cache(mesh::DGMultiMesh, equations, # specialized operators to perform tensor product interpolation to faces for Gauss nodes interp_matrix_gauss_to_face = TensorProductGaussFaceOperator(Interpolation(), dg) - projection_matrix_gauss_to_face = TensorProductGaussFaceOperator(Projection{ - Static.False() - }(), dg) + projection_matrix_gauss_to_face = TensorProductGaussFaceOperator(Projection{Static.False()}(), + dg) # `LIFT` matrix for Gauss nodes - this is equivalent to `projection_matrix_gauss_to_face` scaled by `diagm(rd.wf)`, # where `rd.wf` are Gauss node face quadrature weights. diff --git a/src/solvers/dgmulti/sbp.jl b/src/solvers/dgmulti/sbp.jl index d434d3146ce..232555e18b5 100644 --- a/src/solvers/dgmulti/sbp.jl +++ b/src/solvers/dgmulti/sbp.jl @@ -40,26 +40,28 @@ end const DGMultiPeriodicFDSBP{NDIMS, ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, SurfaceIntegral, - VolumeIntegral - } where {NDIMS, ElemType, - ApproxType <: - SummationByPartsOperators.AbstractPeriodicDerivativeOperator, - SurfaceIntegral, - VolumeIntegral} + VolumeIntegral} where { + NDIMS, + ElemType, + ApproxType <: + SummationByPartsOperators.AbstractPeriodicDerivativeOperator, + SurfaceIntegral, + VolumeIntegral + } const DGMultiFluxDiffPeriodicFDSBP{NDIMS, ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, SurfaceIntegral, - VolumeIntegral - } where {NDIMS, - ElemType, - ApproxType <: - SummationByPartsOperators.AbstractPeriodicDerivativeOperator, - SurfaceIntegral <: - SurfaceIntegralWeakForm, - VolumeIntegral <: - VolumeIntegralFluxDifferencing - } + VolumeIntegral} where { + NDIMS, + ElemType, + ApproxType <: + SummationByPartsOperators.AbstractPeriodicDerivativeOperator, + SurfaceIntegral <: + SurfaceIntegralWeakForm, + VolumeIntegral <: + VolumeIntegralFluxDifferencing + } """ DGMultiMesh(dg::DGMulti) diff --git a/src/solvers/dgmulti/types.jl b/src/solvers/dgmulti/types.jl index ae1eed7fd52..813bc67061e 100644 --- a/src/solvers/dgmulti/types.jl +++ b/src/solvers/dgmulti/types.jl @@ -4,49 +4,46 @@ # `DGMulti` refers to both multiple DG types (polynomial/SBP, simplices/quads/hexes) as well as # the use of multi-dimensional operators in the solver. -const DGMulti{NDIMS, ElemType, ApproxType, SurfaceIntegral, VolumeIntegral} = DG{ - <:RefElemData{ - NDIMS, +const DGMulti{NDIMS, ElemType, ApproxType, SurfaceIntegral, VolumeIntegral} = DG{<:RefElemData{NDIMS, ElemType, - ApproxType - }, + ApproxType}, Mortar, SurfaceIntegral, - VolumeIntegral - } where { - Mortar - } + VolumeIntegral} where { + Mortar + } # Type aliases. The first parameter is `ApproxType` since it is more commonly used for dispatch. const DGMultiWeakForm{ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, <:SurfaceIntegralWeakForm, - <:VolumeIntegralWeakForm - } where {NDIMS} + <:VolumeIntegralWeakForm} where {NDIMS + } const DGMultiFluxDiff{ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, <:SurfaceIntegralWeakForm, - <:Union{ - VolumeIntegralFluxDifferencing, - VolumeIntegralShockCapturingHG - }} where {NDIMS} + <:Union{VolumeIntegralFluxDifferencing, + VolumeIntegralShockCapturingHG}} where { + NDIMS + } const DGMultiFluxDiffSBP{ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, <:SurfaceIntegralWeakForm, - <:Union{ - VolumeIntegralFluxDifferencing, - VolumeIntegralShockCapturingHG - } - } where {NDIMS, - ApproxType <: Union{SBP, - AbstractDerivativeOperator - }} + <:Union{VolumeIntegralFluxDifferencing, + VolumeIntegralShockCapturingHG}} where { + NDIMS, + ApproxType <: + Union{SBP, + AbstractDerivativeOperator} + } const DGMultiSBP{ApproxType, ElemType} = DGMulti{NDIMS, ElemType, ApproxType, - SurfaceIntegral, VolumeIntegral - } where {NDIMS, ElemType, - ApproxType <: Union{SBP, - AbstractDerivativeOperator}, - SurfaceIntegral, VolumeIntegral} + SurfaceIntegral, + VolumeIntegral} where {NDIMS, ElemType, + ApproxType <: + Union{SBP, + AbstractDerivativeOperator}, + SurfaceIntegral, + VolumeIntegral} # By default, Julia/LLVM does not use fused multiply-add operations (FMAs). # Since these FMAs can increase the performance of many numerical algorithms, diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index 1b4e5446e44..6a92fd1c066 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -188,9 +188,9 @@ function MortarL2(basis::LobattoLegendreBasis) reverse_upper = Matrix{RealT}(reverse_upper_) reverse_lower = Matrix{RealT}(reverse_lower_) - LobattoLegendreMortarL2{RealT, nnodes_, typeof(forward_upper), typeof(reverse_upper) - }(forward_upper, forward_lower, - reverse_upper, reverse_lower) + LobattoLegendreMortarL2{RealT, nnodes_, typeof(forward_upper), + typeof(reverse_upper)}(forward_upper, forward_lower, + reverse_upper, reverse_lower) end function Base.show(io::IO, mortar::LobattoLegendreMortarL2) diff --git a/src/solvers/dgsem_p4est/containers.jl b/src/solvers/dgsem_p4est/containers.jl index 0176f5c6346..f9830d0011c 100644 --- a/src/solvers/dgsem_p4est/containers.jl +++ b/src/solvers/dgsem_p4est/containers.jl @@ -429,13 +429,17 @@ function reinitialize_containers!(mesh::P4estMesh, equations, dg::DGSEM, cache) @unpack boundaries = cache resize!(boundaries, required.boundaries) - # resize mortars container - @unpack mortars = cache - resize!(mortars, required.mortars) - - # re-initialize containers together to reduce - # the number of iterations over the mesh in `p4est` - init_surfaces!(interfaces, mortars, boundaries, mesh) + # re-initialize mortars container + if hasproperty(cache, :mortars) # cache_parabolic does not carry mortars + @unpack mortars = cache + resize!(mortars, required.mortars) + + # re-initialize containers together to reduce + # the number of iterations over the mesh in `p4est` + init_surfaces!(interfaces, mortars, boundaries, mesh) + else + init_surfaces!(interfaces, nothing, boundaries, mesh) + end end # A helper struct used in initialization methods below @@ -450,8 +454,7 @@ mutable struct InitSurfacesIterFaceUserData{Interfaces, Mortars, Boundaries, Mes end function InitSurfacesIterFaceUserData(interfaces, mortars, boundaries, mesh) - return InitSurfacesIterFaceUserData{ - typeof(interfaces), typeof(mortars), + return InitSurfacesIterFaceUserData{typeof(interfaces), typeof(mortars), typeof(boundaries), typeof(mesh)}(interfaces, 1, mortars, 1, boundaries, 1, diff --git a/src/solvers/dgsem_p4est/containers_parallel.jl b/src/solvers/dgsem_p4est/containers_parallel.jl index e7ee1f81478..7c7bd868457 100644 --- a/src/solvers/dgsem_p4est/containers_parallel.jl +++ b/src/solvers/dgsem_p4est/containers_parallel.jl @@ -266,8 +266,7 @@ end function ParallelInitSurfacesIterFaceUserData(interfaces, mortars, boundaries, mpi_interfaces, mpi_mortars, mesh) - return ParallelInitSurfacesIterFaceUserData{ - typeof(interfaces), typeof(mortars), + return ParallelInitSurfacesIterFaceUserData{typeof(interfaces), typeof(mortars), typeof(boundaries), typeof(mpi_interfaces), typeof(mpi_mortars), typeof(mesh)}(interfaces, diff --git a/src/solvers/dgsem_p4est/containers_parallel_2d.jl b/src/solvers/dgsem_p4est/containers_parallel_2d.jl index 8c39e4a69c8..d531d33821b 100644 --- a/src/solvers/dgsem_p4est/containers_parallel_2d.jl +++ b/src/solvers/dgsem_p4est/containers_parallel_2d.jl @@ -6,9 +6,7 @@ #! format: noindent # Initialize node_indices of MPI interface container -@inline function init_mpi_interface_node_indices!(mpi_interfaces::P4estMPIInterfaceContainer{ - 2 - }, +@inline function init_mpi_interface_node_indices!(mpi_interfaces::P4estMPIInterfaceContainer{2}, faces, local_side, orientation, mpi_interface_id) # Align interface in positive coordinate direction of primary element. diff --git a/src/solvers/dgsem_p4est/containers_parallel_3d.jl b/src/solvers/dgsem_p4est/containers_parallel_3d.jl index be4e2bfbfc9..56f0a543b97 100644 --- a/src/solvers/dgsem_p4est/containers_parallel_3d.jl +++ b/src/solvers/dgsem_p4est/containers_parallel_3d.jl @@ -6,9 +6,7 @@ #! format: noindent # Initialize node_indices of MPI interface container -@inline function init_mpi_interface_node_indices!(mpi_interfaces::P4estMPIInterfaceContainer{ - 3 - }, +@inline function init_mpi_interface_node_indices!(mpi_interfaces::P4estMPIInterfaceContainer{3}, faces, local_side, orientation, mpi_interface_id) # Align interface at the primary element (primary element has surface indices (:i_forward, :j_forward)). diff --git a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl index cf07645b949..a7f3345168f 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl @@ -94,8 +94,19 @@ function rhs_parabolic!(du, u, t, mesh::P4estMesh{2}, dg.surface_integral, dg) end - # TODO: parabolic; extend to mortars - @assert nmortars(dg, cache) == 0 + # Prolong solution to mortars (specialized for AbstractEquationsParabolic) + # !!! NOTE: we reuse the hyperbolic cache here since it contains "mortars" and "u_threaded". See https://github.com/trixi-framework/Trixi.jl/issues/1674 for a discussion + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars_divergence!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + end + + # Calculate mortar fluxes (specialized for AbstractEquationsParabolic) + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux_divergence!(cache_parabolic.elements.surface_flux_values, + mesh, equations_parabolic, dg.mortar, + dg.surface_integral, dg, cache) + end # Calculate surface integrals @trixi_timeit timer() "surface integral" begin @@ -176,14 +187,15 @@ function calc_gradient!(gradients, u_transformed, t, end end - # Prolong solution to interfaces + # Prolong solution to interfaces. + # This reuses `prolong2interfaces` for the purely hyperbolic case. @trixi_timeit timer() "prolong2interfaces" begin prolong2interfaces!(cache_parabolic, u_transformed, mesh, equations_parabolic, dg.surface_integral, dg) end - # Calculate interface fluxes for the gradient. This reuses P4est `calc_interface_flux!` along with a - # specialization for AbstractEquationsParabolic. + # Calculate interface fluxes for the gradient. + # This reuses `calc_interface_flux!` for the purely hyperbolic case. @trixi_timeit timer() "interface flux" begin calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, False(), # False() = no nonconservative terms @@ -202,8 +214,21 @@ function calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, dg.surface_integral, dg) end - # TODO: parabolic; mortars - @assert nmortars(dg, cache) == 0 + # Prolong solution to mortars. This resues the hyperbolic version of `prolong2mortars` + @trixi_timeit timer() "prolong2mortars" begin + prolong2mortars!(cache, u_transformed, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + end + + # Calculate mortar fluxes. This reuses the hyperbolic version of `calc_mortar_flux`, + # along with a specialization on `calc_mortar_flux!(fstar, ...)` and `mortar_fluxes_to_elements!` for + # AbstractEquationsParabolic. + @trixi_timeit timer() "mortar flux" begin + calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, + mesh, False(), # False() = no nonconservative terms + equations_parabolic, + dg.mortar, dg.surface_integral, dg, cache) + end # Calculate surface integrals @trixi_timeit timer() "surface integral" begin @@ -303,6 +328,64 @@ function calc_gradient!(gradients, u_transformed, t, return nothing end +# This version is called during `calc_gradients!` and must be specialized because the +# flux for the gradient is {u}, which doesn't depend on the outward normal. Thus, +# you don't need to scale by 2 (e.g., the scaling factor in the normals (and in the +# contravariant vectors) along large/small elements across a non-conforming +# interface in 2D) and flip the sign when storing the mortar fluxes back +# into `surface_flux_values`. +@inline function mortar_fluxes_to_elements!(surface_flux_values, + mesh::Union{P4estMesh{2}, T8codeMesh{2}}, + equations::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, + dg::DGSEM, cache, mortar, fstar, u_buffer) + @unpack neighbor_ids, node_indices = cache.mortars + # Copy solution small to small + small_indices = node_indices[1, mortar] + small_direction = indices2direction(small_indices) + + for position in 1:2 + element = neighbor_ids[position, mortar] + for i in eachnode(dg) + for v in eachvariable(equations) + surface_flux_values[v, i, small_direction, element] = fstar[position][v, + i] + end + end + end + + # Project small fluxes to large element. + multiply_dimensionwise!(u_buffer, + mortar_l2.reverse_upper, fstar[2], + mortar_l2.reverse_lower, fstar[1]) + + # Copy interpolated flux values from buffer to large element face in the + # correct orientation. + # Note that the index of the small sides will always run forward but + # the index of the large side might need to run backwards for flipped sides. + large_element = neighbor_ids[3, mortar] + large_indices = node_indices[2, mortar] + large_direction = indices2direction(large_indices) + + if :i_backward in large_indices + for i in eachnode(dg) + for v in eachvariable(equations) + surface_flux_values[v, end + 1 - i, large_direction, large_element] = u_buffer[v, + i] + end + end + else + for i in eachnode(dg) + for v in eachvariable(equations) + surface_flux_values[v, i, large_direction, large_element] = u_buffer[v, + i] + end + end + end + + return nothing +end + # This version is used for parabolic gradient computations @inline function calc_interface_flux!(surface_flux_values, mesh::P4estMesh{2}, nonconservative_terms::False, @@ -321,7 +404,7 @@ end flux_ = 0.5 * (u_ll + u_rr) # we assume that the gradient computations utilize a central flux - # Note that we don't flip the sign on the secondondary flux. This is because for parabolic terms, + # Note that we don't flip the sign on the secondary flux. This is because for parabolic terms, # the normals are not embedded in `flux_` for the parabolic gradient computations. for v in eachvariable(equations) surface_flux_values[v, primary_node_index, primary_direction_index, primary_element_index] = flux_[v] @@ -520,6 +603,163 @@ function calc_interface_flux!(surface_flux_values, return nothing end +function prolong2mortars_divergence!(cache, flux_viscous::Vector{Array{uEltype, 4}}, + mesh::Union{P4estMesh{2}, T8codeMesh{2}}, equations, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DGSEM) where {uEltype <: Real} + @unpack neighbor_ids, node_indices = cache.mortars + @unpack contravariant_vectors = cache.elements + index_range = eachnode(dg) + + flux_viscous_x, flux_viscous_y = flux_viscous + + @threaded for mortar in eachmortar(dg, cache) + # Copy solution data from the small elements using "delayed indexing" with + # a start value and a step size to get the correct face and orientation. + small_indices = node_indices[1, mortar] + direction_index = indices2direction(small_indices) + + i_small_start, i_small_step = index_to_start_step_2d(small_indices[1], + index_range) + j_small_start, j_small_step = index_to_start_step_2d(small_indices[2], + index_range) + + for position in 1:2 + i_small = i_small_start + j_small = j_small_start + element = neighbor_ids[position, mortar] + for i in eachnode(dg) + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, + i_small, j_small, element) + + for v in eachvariable(equations) + flux_viscous = SVector(flux_viscous_x[v, i_small, j_small, element], + flux_viscous_y[v, i_small, j_small, element]) + + cache.mortars.u[1, v, position, i, mortar] = dot(flux_viscous, + normal_direction) + end + i_small += i_small_step + j_small += j_small_step + end + end + + # Buffer to copy solution values of the large element in the correct orientation + # before interpolating + u_buffer = cache.u_threaded[Threads.threadid()] + + # Copy solution of large element face to buffer in the + # correct orientation + large_indices = node_indices[2, mortar] + direction_index = indices2direction(large_indices) + + i_large_start, i_large_step = index_to_start_step_2d(large_indices[1], + index_range) + j_large_start, j_large_step = index_to_start_step_2d(large_indices[2], + index_range) + + i_large = i_large_start + j_large = j_large_start + element = neighbor_ids[3, mortar] + for i in eachnode(dg) + normal_direction = get_normal_direction(direction_index, contravariant_vectors, + i_large, j_large, element) + + for v in eachvariable(equations) + flux_viscous = SVector(flux_viscous_x[v, i_large, j_large, element], + flux_viscous_y[v, i_large, j_large, element]) + + # We prolong the viscous flux dotted with respect the outward normal + # on the small element. We scale by -1/2 here because the normal + # direction on the large element is negative 2x that of the small + # element (these normal directions are "scaled" by the surface Jacobian) + u_buffer[v, i] = -0.5 * dot(flux_viscous, normal_direction) + end + i_large += i_large_step + j_large += j_large_step + end + + # Interpolate large element face data from buffer to small face locations + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 1, :, mortar), + mortar_l2.forward_lower, + u_buffer) + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 2, :, mortar), + mortar_l2.forward_upper, + u_buffer) + end + + return nothing +end + +# We specialize `calc_mortar_flux!` for the divergence part of +# the parabolic terms. +function calc_mortar_flux_divergence!(surface_flux_values, + mesh::Union{P4estMesh{2}, T8codeMesh{2}}, + equations::AbstractEquationsParabolic, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DG, cache) + @unpack neighbor_ids, node_indices = cache.mortars + @unpack contravariant_vectors = cache.elements + @unpack fstar_upper_threaded, fstar_lower_threaded = cache + index_range = eachnode(dg) + + @threaded for mortar in eachmortar(dg, cache) + # Choose thread-specific pre-allocated container + fstar = (fstar_lower_threaded[Threads.threadid()], + fstar_upper_threaded[Threads.threadid()]) + + for position in 1:2 + for node in eachnode(dg) + for v in eachvariable(equations) + viscous_flux_normal_ll = cache.mortars.u[1, v, position, node, mortar] + viscous_flux_normal_rr = cache.mortars.u[2, v, position, node, mortar] + + # TODO: parabolic; only BR1 at the moment + fstar[position][v, node] = 0.5 * (viscous_flux_normal_ll + + viscous_flux_normal_rr) + end + end + end + + # Buffer to interpolate flux values of the large element to before + # copying in the correct orientation + u_buffer = cache.u_threaded[Threads.threadid()] + + # this reuses the hyperbolic version of `mortar_fluxes_to_elements!` + mortar_fluxes_to_elements!(surface_flux_values, + mesh, equations, mortar_l2, dg, cache, + mortar, fstar, u_buffer) + end + + return nothing +end + +# We structure `calc_interface_flux!` similarly to "calc_mortar_flux!" for +# hyperbolic equations with no nonconservative terms. +# The reasoning is that parabolic fluxes are treated like conservative +# terms (e.g., we compute a viscous conservative "flux") and thus no +# non-conservative terms are present. +@inline function calc_mortar_flux!(fstar, + mesh::Union{P4estMesh{2}, T8codeMesh{2}}, + nonconservative_terms::False, + equations::AbstractEquationsParabolic, + surface_integral, dg::DG, cache, + mortar_index, position_index, normal_direction, + node_index) + @unpack u = cache.mortars + @unpack surface_flux = surface_integral + + u_ll, u_rr = get_surface_node_vars(u, equations, dg, position_index, node_index, + mortar_index) + + # TODO: parabolic; only BR1 at the moment + flux_ = 0.5 * (u_ll + u_rr) + + # Copy flux to buffer + set_node_vars!(fstar[position_index], flux_, equations, dg, node_index) +end + # TODO: parabolic, finish implementing `calc_boundary_flux_gradients!` and `calc_boundary_flux_divergence!` function prolong2boundaries!(cache_parabolic, flux_viscous, mesh::P4estMesh{2}, @@ -563,8 +803,7 @@ end function calc_boundary_flux_gradients!(cache, t, boundary_condition::Union{BoundaryConditionPeriodic, - BoundaryConditionDoNothing - }, + BoundaryConditionDoNothing}, mesh::P4estMesh, equations, surface_integral, dg::DG) @assert isempty(eachboundary(dg, cache)) end diff --git a/src/solvers/dgsem_p4est/dg_parallel.jl b/src/solvers/dgsem_p4est/dg_parallel.jl index 324bc7f3cd6..712ede2bfce 100644 --- a/src/solvers/dgsem_p4est/dg_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_parallel.jl @@ -342,8 +342,7 @@ function InitNeighborRankConnectivityIterFaceUserData(mpi_interfaces, mpi_mortar global_mortar_ids = fill(-1, nmpimortars(mpi_mortars)) neighbor_ranks_mortar = Vector{Vector{Int}}(undef, nmpimortars(mpi_mortars)) - return InitNeighborRankConnectivityIterFaceUserData{ - typeof(mpi_interfaces), + return InitNeighborRankConnectivityIterFaceUserData{typeof(mpi_interfaces), typeof(mpi_mortars), typeof(mesh)}(mpi_interfaces, 1, global_interface_ids, diff --git a/src/solvers/dgsem_structured/containers.jl b/src/solvers/dgsem_structured/containers.jl index 41eabf7c6bf..8adf005b782 100644 --- a/src/solvers/dgsem_structured/containers.jl +++ b/src/solvers/dgsem_structured/containers.jl @@ -5,8 +5,8 @@ @muladd begin #! format: noindent -struct ElementContainer{NDIMS, RealT <: Real, uEltype <: Real, NDIMSP1, NDIMSP2, NDIMSP3 - } +struct ElementContainer{NDIMS, RealT <: Real, uEltype <: Real, NDIMSP1, NDIMSP2, + NDIMSP3} # Physical coordinates at each node node_coordinates::Array{RealT, NDIMSP2} # [orientation, node_i, node_j, node_k, element] # ID of neighbor element in negative direction in orientation diff --git a/src/solvers/dgsem_tree/dg_1d_parabolic.jl b/src/solvers/dgsem_tree/dg_1d_parabolic.jl index 90007b05b3d..0017f9ca88e 100644 --- a/src/solvers/dgsem_tree/dg_1d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_1d_parabolic.jl @@ -290,10 +290,8 @@ function calc_boundary_flux_gradients!(cache, t, 2, firsts[2], lasts[2]) end -function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{ - <:Any, - 3 - }, +function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{<:Any, + 3}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, @@ -358,10 +356,8 @@ function calc_boundary_flux_divergence!(cache, t, dg, cache, 2, firsts[2], lasts[2]) end -function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{ - <:Any, - 3 - }, +function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{<:Any, + 3}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, diff --git a/src/solvers/dgsem_tree/dg_2d.jl b/src/solvers/dgsem_tree/dg_2d.jl index 7ecf4c00032..547ed352ef3 100644 --- a/src/solvers/dgsem_tree/dg_2d.jl +++ b/src/solvers/dgsem_tree/dg_2d.jl @@ -391,8 +391,8 @@ end @inline function fv_kernel!(du, u, mesh::Union{TreeMesh{2}, StructuredMesh{2}, - UnstructuredMesh2D, P4estMesh{2}, T8codeMesh{2} - }, + UnstructuredMesh2D, P4estMesh{2}, + T8codeMesh{2}}, nonconservative_terms, equations, volume_flux_fv, dg::DGSEM, cache, element, alpha = true) @unpack fstar1_L_threaded, fstar1_R_threaded, fstar2_L_threaded, fstar2_R_threaded = cache diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl index 06abff5e85b..3083ae30680 100644 --- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl @@ -384,10 +384,8 @@ function calc_boundary_flux_gradients!(cache, t, cache, 4, firsts[4], lasts[4]) end -function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{ - <:Any, - 4 - }, +function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{<:Any, + 4}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, @@ -464,10 +462,8 @@ function calc_boundary_flux_divergence!(cache, t, dg, cache, 4, firsts[4], lasts[4]) end -function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{ - <:Any, - 4 - }, +function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{<:Any, + 4}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, diff --git a/src/solvers/dgsem_tree/dg_3d.jl b/src/solvers/dgsem_tree/dg_3d.jl index 3364187e93c..0955dc38655 100644 --- a/src/solvers/dgsem_tree/dg_3d.jl +++ b/src/solvers/dgsem_tree/dg_3d.jl @@ -209,8 +209,8 @@ function rhs!(du, u, t, end function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3} - }, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, + P4estMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralWeakForm, dg::DGSEM, cache) @@ -264,8 +264,8 @@ See also https://github.com/trixi-framework/Trixi.jl/issues/1671#issuecomment-17 end function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3} - }, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, + P4estMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralFluxDifferencing, dg::DGSEM, cache) @@ -378,8 +378,8 @@ end # TODO: Taal dimension agnostic function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3} - }, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, + P4estMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralShockCapturingHG, dg::DGSEM, cache) diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl index 2561c5fe5b0..9ad28c6aa8e 100644 --- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl @@ -454,10 +454,8 @@ function calc_boundary_flux_gradients!(cache, t, 6, firsts[6], lasts[6]) end -function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{ - <:Any, - 5 - }, +function calc_boundary_flux_by_direction_gradient!(surface_flux_values::AbstractArray{<:Any, + 5}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, @@ -546,10 +544,8 @@ function calc_boundary_flux_divergence!(cache, t, dg, cache, 6, firsts[6], lasts[6]) end -function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{ - <:Any, - 5 - }, +function calc_boundary_flux_by_direction_divergence!(surface_flux_values::AbstractArray{<:Any, + 5}, t, boundary_condition, equations_parabolic::AbstractEquationsParabolic, diff --git a/src/solvers/dgsem_tree/indicators_1d.jl b/src/solvers/dgsem_tree/indicators_1d.jl index 4006932352e..dff87bfe06c 100644 --- a/src/solvers/dgsem_tree/indicators_1d.jl +++ b/src/solvers/dgsem_tree/indicators_1d.jl @@ -29,8 +29,8 @@ end # full FV element. # # TODO: TrixiShallowWater: move new indicator type -function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, 3 - }, +function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, + 3}, mesh, equations::ShallowWaterEquations1D, dg::DGSEM, cache; diff --git a/src/solvers/dgsem_tree/indicators_2d.jl b/src/solvers/dgsem_tree/indicators_2d.jl index 8333bb515d3..fa8ed481eb9 100644 --- a/src/solvers/dgsem_tree/indicators_2d.jl +++ b/src/solvers/dgsem_tree/indicators_2d.jl @@ -33,8 +33,8 @@ end # full FV element. # # TODO: TrixiShallowWater: move new indicator type -function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, 4 - }, +function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, + 4}, mesh, equations::ShallowWaterEquations2D, dg::DGSEM, cache; diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl index bc69e55f264..384f4178bc9 100644 --- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl @@ -8,10 +8,9 @@ # this method is used when the limiter is constructed as for shock-capturing volume integrals function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquations{2}, basis::LobattoLegendreBasis, bound_keys) - subcell_limiter_coefficients = Trixi.ContainerSubcellLimiterIDP2D{real(basis) - }(0, - nnodes(basis), - bound_keys) + subcell_limiter_coefficients = Trixi.ContainerSubcellLimiterIDP2D{real(basis)}(0, + nnodes(basis), + bound_keys) # Memory for bounds checking routine with `BoundsCheckCallback`. # The first entry of each vector contains the maximum deviation since the last export. diff --git a/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl b/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl index cad5542aae3..b5388cadc8b 100644 --- a/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl +++ b/src/solvers/dgsem_unstructured/sort_boundary_conditions.jl @@ -56,7 +56,8 @@ function initialize!(boundary_types_container::UnstructuredSortedBoundaryTypes{N for key in keys(boundary_dictionary) if !(key in all_names) println(stderr, - "ERROR: Key $(repr(key)) is not a valid boundary name") + "ERROR: Key $(repr(key)) is not a valid boundary name. " * + "Valid names are $all_names.") MPI.Abort(mpi_comm(), 1) end end @@ -67,7 +68,8 @@ function initialize!(boundary_types_container::UnstructuredSortedBoundaryTypes{N else for key in keys(boundary_dictionary) if !(key in unique_names) - error("Key $(repr(key)) is not a valid boundary name") + error("Key $(repr(key)) is not a valid boundary name. " * + "Valid names are $unique_names.") end end end diff --git a/src/visualization/recipes_plots.jl b/src/visualization/recipes_plots.jl index d15f7e542e1..0e9b5a66a8d 100644 --- a/src/visualization/recipes_plots.jl +++ b/src/visualization/recipes_plots.jl @@ -57,11 +57,8 @@ end # Visualize the mesh in a 2D plot # # Note: This is an experimental feature and may be changed in future releases without notice. -RecipesBase.@recipe function f(pm::PlotMesh{ - <:PlotData2DCartesian{<:Any, - <:AbstractVector{ - <:AbstractVector - }}}) +RecipesBase.@recipe function f(pm::PlotMesh{<:PlotData2DCartesian{<:Any, + <:AbstractVector{<:AbstractVector}}}) @unpack plot_data = pm @unpack x, y, mesh_vertices_x, mesh_vertices_y = plot_data diff --git a/test/Project.toml b/test/Project.toml index fcb96b9e48f..ecae0ac0900 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -12,7 +12,7 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] -Aqua = "0.7" +Aqua = "0.8" CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10" Downloads = "1" ForwardDiff = "0.10" diff --git a/test/test_aqua.jl b/test/test_aqua.jl index 9f57791406f..93457caba28 100644 --- a/test/test_aqua.jl +++ b/test/test_aqua.jl @@ -11,8 +11,8 @@ include("test_trixi.jl") ambiguities = false, # exceptions necessary for adding a new method `StartUpDG.estimate_h` # in src/solvers/dgmulti/sbp.jl - piracy = (treat_as_own = [Trixi.StartUpDG.RefElemData, - Trixi.StartUpDG.MeshData],)) + piracies = (treat_as_own = [Trixi.StartUpDG.RefElemData, + Trixi.StartUpDG.MeshData],)) end end #module diff --git a/test/test_mpi_p4est_2d.jl b/test/test_mpi_p4est_2d.jl index 1edbce8f6c8..da90537fcfd 100644 --- a/test/test_mpi_p4est_2d.jl +++ b/test/test_mpi_p4est_2d.jl @@ -69,7 +69,10 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[4.507575525876275e-6], - linf=[6.21489667023134e-5]) + linf=[6.21489667023134e-5], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) end @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin diff --git a/test/test_mpi_p4est_3d.jl b/test/test_mpi_p4est_3d.jl index 8082930b3b4..75f43650082 100644 --- a/test/test_mpi_p4est_3d.jl +++ b/test/test_mpi_p4est_3d.jl @@ -63,7 +63,10 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[0.002590388934758452], - linf=[0.01840757696885409]) + linf=[0.01840757696885409], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) end @trixi_testset "elixir_advection_cubed_sphere.jl" begin diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index 07c6d02bbcd..db34aecc168 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -94,7 +94,10 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[4.507575525876275e-6], - linf=[6.21489667023134e-5]) + linf=[6.21489667023134e-5], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -216,8 +219,8 @@ end ], surface_flux=flux_hlle, tspan=(0.0, 0.3)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] diff --git a/test/test_p4est_3d.jl b/test/test_p4est_3d.jl index 346c61c7448..f2467f30204 100644 --- a/test/test_p4est_3d.jl +++ b/test/test_p4est_3d.jl @@ -110,7 +110,10 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[0.002590388934758452], - linf=[0.01840757696885409]) + linf=[0.01840757696885409], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -305,8 +308,8 @@ end ], tspan=(0.0, 0.3), surface_flux=flux_hlle) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index 152ca52a6ca..6632cd0bb27 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -541,6 +541,38 @@ end end end +@trixi_testset "P4estMesh2D: elixir_advection_diffusion_periodic_amr.jl" begin + @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + "elixir_advection_diffusion_periodic_amr.jl"), + tspan=(0.0, 0.01), + l2=[0.014715887539773128], + linf=[0.2285802791900049]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "P4estMesh2D: elixir_advection_diffusion_nonperiodic_amr.jl" begin + @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + "elixir_advection_diffusion_nonperiodic_amr.jl"), + tspan=(0.0, 0.01), + l2=[0.00793438523666649], + linf=[0.11030633127144573]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "P4estMesh2D: elixir_advection_diffusion_nonperiodic_curved.jl" begin @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_advection_diffusion_nonperiodic_curved.jl"), @@ -635,6 +667,28 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "P4estMesh2D: elixir_navierstokes_lid_driven_cavity_amr.jl" begin + @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", + "elixir_navierstokes_lid_driven_cavity_amr.jl"), + tspan=(0.0, 1.0), + l2=[ + 0.0005323841980601085, 0.07892044543547208, + 0.02909671646389337, 0.11717468256112017, + ], + linf=[ + 0.006045292737899444, 0.9233292581786228, + 0.7982129977236198, 1.6864546235292153, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory diff --git a/test/test_special_elixirs.jl b/test/test_special_elixirs.jl index 85671002ba6..ba670a6025e 100644 --- a/test/test_special_elixirs.jl +++ b/test/test_special_elixirs.jl @@ -92,8 +92,7 @@ coverage = occursin("--code-coverage", cmd) && !occursin("--code-coverage=none", # the convergence test logic @test_nowarn_mod convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "tree_2d_dgsem", - "elixir_advection_basic.jl"), 2, - tspan = (0.0, 0.01)) + "elixir_advection_basic.jl"), 2) @test_nowarn_mod convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "tree_2d_dgsem", "elixir_advection_extended.jl"), 2, @@ -101,12 +100,10 @@ coverage = occursin("--code-coverage", cmd) && !occursin("--code-coverage=none", tspan = (0.0, 0.1)) @test_nowarn_mod convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "structured_2d_dgsem", - "elixir_advection_basic.jl"), 2, - tspan = (0.0, 0.01)) + "elixir_advection_basic.jl"), 2) @test_nowarn_mod convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "structured_2d_dgsem", - "elixir_advection_coupled.jl"), 2, - tspan = (0.0, 0.01)) + "elixir_advection_coupled.jl"), 2) @test_nowarn_mod convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "structured_2d_dgsem", "elixir_advection_extended.jl"), 2, @@ -345,8 +342,8 @@ end end @timed_testset "elixir_euler_ad.jl" begin - @test_trixi_include(joinpath(examples_dir(), "special_elixirs", - "elixir_euler_ad.jl")) + @test_nowarn_mod trixi_include(joinpath(examples_dir(), "special_elixirs", + "elixir_euler_ad.jl")) end end end diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index dd2248e10b2..1addc29e3e6 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -194,7 +194,10 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[4.219208035582454e-6], - linf=[3.438434404412494e-5]) + linf=[3.438434404412494e-5], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -211,7 +214,10 @@ end linf=[0.0015194252169410394], rtol=5.0e-5, # Higher tolerance to make tests pass in CI (in particular with macOS) elixir_file="elixir_advection_waving_flag.jl", - restart_file="restart_000021.h5") + restart_file="restart_000021.h5", + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -227,7 +233,36 @@ end l2=[7.841217436552029e-15], linf=[1.0857981180834031e-13], elixir_file="elixir_advection_free_stream.jl", - restart_file="restart_000036.h5") + restart_file="restart_000036.h5", + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_eulermulti_convergence_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulermulti_convergence_ec.jl"), + l2=[ + 1.5123651627525257e-5, + 1.51236516273878e-5, + 2.4544918394022538e-5, + 5.904791661362391e-6, + 1.1809583322724782e-5, + ], + linf=[ + 8.393471747591974e-5, + 8.393471748258108e-5, + 0.00015028562494778797, + 3.504466610437795e-5, + 7.00893322087559e-5, + ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -598,6 +633,29 @@ end end end +@trixi_testset "elixir_eulerpolytropic_convergence.jl: HLL(Davis)" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_convergence.jl"), + solver=DGSEM(polydeg = 3, + surface_flux = FluxHLL(min_max_speed_davis), + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)), + l2=[ + 0.0016689832177644243, 0.0025920263793104445, + 0.003281074494629298, + ], + linf=[ + 0.01099488320190023, 0.013309526619350365, + 0.02008032661117909, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_eulerpolytropic_ec.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_ec.jl"), l2=[ diff --git a/test/test_structured_3d.jl b/test/test_structured_3d.jl index 0213e1a9813..a52c459d6be 100644 --- a/test/test_structured_3d.jl +++ b/test/test_structured_3d.jl @@ -62,7 +62,10 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[0.0025903889347585777], - linf=[0.018407576968841655]) + linf=[0.018407576968841655], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_threaded.jl b/test/test_threaded.jl index 478c90b476a..dbbcbf4c7ce 100644 --- a/test/test_threaded.jl +++ b/test/test_threaded.jl @@ -208,7 +208,10 @@ end linf=[0.0015194252169410394], rtol=5.0e-5, # Higher tolerance to make tests pass in CI (in particular with macOS) elixir_file="elixir_advection_waving_flag.jl", - restart_file="restart_000021.h5") + restart_file="restart_000021.h5", + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_tree_1d_advection.jl b/test/test_tree_1d_advection.jl index 7cfd78e0ade..a580a3b5600 100644 --- a/test/test_tree_1d_advection.jl +++ b/test/test_tree_1d_advection.jl @@ -54,6 +54,20 @@ end end end +@trixi_testset "elixir_advection_basic.jl (No errors)" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + analysis_callback=AnalysisCallback(semi, interval = 42, + analysis_errors = Symbol[])) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_advection_finite_volume.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_finite_volume.jl"), l2=[0.011662300515980219], diff --git a/test/test_tree_1d_mhd.jl b/test/test_tree_1d_mhd.jl index b34bdf0660c..2150ddfd074 100644 --- a/test/test_tree_1d_mhd.jl +++ b/test/test_tree_1d_mhd.jl @@ -109,6 +109,31 @@ end end end +@trixi_testset "elixir_mhd_alfven_wave.jl with flux_hllc" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_alfven_wave.jl"), + l2=[ + 1.036850596986597e-5, 1.965192583650368e-6, + 3.5882124656715505e-5, 3.5882124656638764e-5, + 5.270975504780837e-6, 1.1963224165731992e-16, + 3.595811808912869e-5, 3.5958118089159453e-5, + ], + linf=[ + 2.887280521446378e-5, 7.310580790352001e-6, + 0.00012390046377899755, 0.00012390046377787345, + 1.5102711136583125e-5, 2.220446049250313e-16, + 0.0001261935452181312, 0.0001261935452182006, + ], + surface_flux=flux_hllc) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_mhd_ec.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_ec.jl"), l2=[ @@ -206,6 +231,31 @@ end end end +@trixi_testset "elixir_mhd_torrilhon_shock_tube.jl (HLLC)" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_torrilhon_shock_tube.jl"), + surface_flux=flux_hllc, + l2=[ + 0.4573799618744708, 0.4792633358230866, 0.34064852506872795, + 0.4479668434955162, 0.9203891782415092, + 1.3216517820475193e-16, 0.28887826520860815, + 0.255281629265771, + ], + linf=[ + 1.2382842201671505, 0.8929169308132259, 0.871298623806198, + 0.9822415614542821, 1.6726170732132717, + 2.220446049250313e-16, 0.7016155888023747, + 0.6556091522071984, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_mhd_ryujones_shock_tube.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_ryujones_shock_tube.jl"), l2=[ diff --git a/test/test_tree_2d_eulerpolytropic.jl b/test/test_tree_2d_eulerpolytropic.jl new file mode 100644 index 00000000000..545cf7274ff --- /dev/null +++ b/test/test_tree_2d_eulerpolytropic.jl @@ -0,0 +1,35 @@ +module TestExamples2DEulerMulticomponent + +using Test +using Trixi + +include("test_trixi.jl") + +EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_2d_dgsem") + +@testset "Polytropic Euler" begin +#! format: noindent + +@trixi_testset "elixir_eulerpolytropic_convergence.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_eulerpolytropic_convergence.jl"), + l2=[ + 0.0016689832177626373, 0.0025920263793094526, + 0.003281074494626679, + ], + linf=[ + 0.010994883201896677, 0.013309526619350365, + 0.02008032661117376, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end +end + +end # module diff --git a/test/test_tree_2d_part2.jl b/test/test_tree_2d_part2.jl index d8e86d14f18..622f12109ff 100644 --- a/test/test_tree_2d_part2.jl +++ b/test/test_tree_2d_part2.jl @@ -26,6 +26,9 @@ isdir(outdir) && rm(outdir, recursive = true) # Compressible Euler Multicomponent include("test_tree_2d_eulermulti.jl") + # Compressible Polytropic Euler + include("test_tree_2d_eulerpolytropic.jl") + # Compressible Euler coupled with acoustic perturbation equations include("test_tree_2d_euleracoustics.jl") diff --git a/test/test_tree_3d_advection.jl b/test/test_tree_3d_advection.jl index 56278629417..ae53a2df52f 100644 --- a/test/test_tree_3d_advection.jl +++ b/test/test_tree_3d_advection.jl @@ -27,7 +27,10 @@ end @trixi_testset "elixir_advection_restart.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart.jl"), l2=[0.00016017848135651983], - linf=[0.0014175368788298393]) + linf=[0.0014175368788298393], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_tree_3d_fdsbp.jl b/test/test_tree_3d_fdsbp.jl index 16508df300e..e0e2bfe4b88 100644 --- a/test/test_tree_3d_fdsbp.jl +++ b/test/test_tree_3d_fdsbp.jl @@ -32,8 +32,8 @@ end xmax = 1.0, N = 10) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_extended.jl"), - l2=[1.3819894522373702e-8], - linf=[3.381866298113323e-8], + l2=[5.228248923012878e-9], + linf=[9.24430243465224e-9], D_SBP=D, initial_refinement_level=0, tspan=(0.0, 5.0)) diff --git a/test/test_trixi.jl b/test/test_trixi.jl index 245efbc0175..cebe2164ae6 100644 --- a/test/test_trixi.jl +++ b/test/test_trixi.jl @@ -33,7 +33,7 @@ macro test_trixi_include(elixir, args...) local kwargs = Pair{Symbol, Any}[] for arg in args if (arg.head == :(=) && - !(arg.args[1] in (:l2, :linf, :atol, :rtol, :coverage_override)) + !(arg.args[1] in (:l2, :linf, :atol, :rtol, :coverage_override, :skip_coverage)) && !(coverage && arg.args[1] in keys(coverage_override))) push!(kwargs, Pair(arg.args...)) end diff --git a/test/test_unit.jl b/test/test_unit.jl index 0c5f2bf0a4c..d2e744da62f 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -296,9 +296,11 @@ end end Trixi.move_connectivity!(c::MyContainer, first, last, destination) = c Trixi.delete_connectivity!(c::MyContainer, first, last) = c - Trixi.reset_data_structures!(c::MyContainer) = (c.data = Vector{Int}(undef, - c.capacity + 1); - c) + function Trixi.reset_data_structures!(c::MyContainer) + (c.data = Vector{Int}(undef, + c.capacity + 1); + c) + end function Base.:(==)(c1::MyContainer, c2::MyContainer) return (c1.capacity == c2.capacity && c1.length == c2.length && @@ -611,6 +613,18 @@ end @test_throws ArgumentError TimeSeriesCallback(semi, [1.0 1.0 1.0; 2.0 2.0 2.0]) end +@timed_testset "Consistency check for single point flux: CEMCE" begin + equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.4), + gas_constants = (0.4, 0.4)) + u = SVector(0.1, -0.5, 1.0, 1.0, 2.0) + + orientations = [1, 2] + for orientation in orientations + @test flux(u, orientation, equations) ≈ + flux_ranocha(u, u, orientation, equations) + end +end + @timed_testset "Consistency check for HLL flux (naive): CEE" begin flux_hll = FluxHLL(min_max_speed_naive) @@ -782,6 +796,54 @@ end end end +@timed_testset "Consistency check for HLL flux with Davis wave speed estimates: Polytropic CEE" begin + flux_hll = FluxHLL(min_max_speed_davis) + + gamma = 1.4 + kappa = 0.5 # Scaling factor for the pressure. + equations = PolytropicEulerEquations2D(gamma, kappa) + u = SVector(1.1, -0.5, 2.34) + + orientations = [1, 2] + for orientation in orientations + @test flux_hll(u, u, orientation, equations) ≈ flux(u, orientation, equations) + end + + normal_directions = [SVector(1.0, 0.0), + SVector(0.0, 1.0), + SVector(0.5, -0.5), + SVector(-1.2, 0.3)] + + for normal_direction in normal_directions + @test flux_hll(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end +end + +@timed_testset "Consistency check for Winters flux: Polytropic CEE" begin + for gamma in [1.4, 1.0, 5 / 3] + kappa = 0.5 # Scaling factor for the pressure. + equations = PolytropicEulerEquations2D(gamma, kappa) + u = SVector(1.1, -0.5, 2.34) + + orientations = [1, 2] + for orientation in orientations + @test flux_winters_etal(u, u, orientation, equations) ≈ + flux(u, orientation, equations) + end + + normal_directions = [SVector(1.0, 0.0), + SVector(0.0, 1.0), + SVector(0.5, -0.5), + SVector(-1.2, 0.3)] + + for normal_direction in normal_directions + @test flux_winters_etal(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end + end +end + @timed_testset "Consistency check for HLL flux with Davis wave speed estimates: LEE" begin flux_hll = FluxHLL(min_max_speed_davis) @@ -958,6 +1020,7 @@ end for u in u_values @test flux_hlle(u, u, 1, equations) ≈ flux(u, 1, equations) + @test flux_hllc(u, u, 1, equations) ≈ flux(u, 1, equations) end equations = IdealGlmMhdEquations2D(1.4, 5.0) #= c_h =# @@ -1172,6 +1235,29 @@ end end @testset "FluxRotated vs. direct implementation" begin + @timed_testset "CompressibleEulerMulticomponentEquations2D" begin + equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.4), + gas_constants = (0.4, + 0.4)) + normal_directions = [SVector(1.0, 0.0), + SVector(0.0, 1.0), + SVector(0.5, -0.5), + SVector(-1.2, 0.3)] + u_values = [SVector(0.1, -0.5, 1.0, 1.0, 2.0), + SVector(-0.1, -0.3, 1.2, 1.3, 1.4)] + + f_std = flux + f_rot = FluxRotated(f_std) + println(typeof(f_std)) + println(typeof(f_rot)) + for u in u_values, + normal_direction in normal_directions + + @test f_rot(u, normal_direction, equations) ≈ + f_std(u, normal_direction, equations) + end + end + @timed_testset "CompressibleEulerEquations2D" begin equations = CompressibleEulerEquations2D(1.4) normal_directions = [SVector(1.0, 0.0), @@ -1325,6 +1411,103 @@ end @test mesh.boundary_faces[:entire_boundary] == [1, 2] end + +@testset "trixi_include" begin + @trixi_testset "Basic" begin + example = """ + x = 4 + """ + + filename = tempname() + try + open(filename, "w") do file + write(file, example) + end + + # Use `@trixi_testset`, which wraps code in a temporary module, and call + # `trixi_include` with `@__MODULE__` in order to isolate this test. + @test_warn "You just called" trixi_include(@__MODULE__, filename) + @test @isdefined x + @test x == 4 + + @test_warn "You just called" trixi_include(@__MODULE__, filename, x = 7) + @test x == 7 + + @test_throws "assignment `y` not found in expression" trixi_include(@__MODULE__, + filename, + y = 3) + finally + rm(filename, force = true) + end + end + + @trixi_testset "With `solve` Without `maxiters`" begin + # `trixi_include` assumes this to be the `solve` function of OrdinaryDiffEq, + # and therefore tries to insert the kwarg `maxiters`, which will fail here. + example = """ + solve() = 0 + x = solve() + """ + + filename = tempname() + try + open(filename, "w") do file + write(file, example) + end + + # Use `@trixi_testset`, which wraps code in a temporary module, and call + # `trixi_include` with `@__MODULE__` in order to isolate this test. + @test_throws "no method matching solve(; maxiters::Int64)" trixi_include(@__MODULE__, + filename) + + @test_throws "no method matching solve(; maxiters::Int64)" trixi_include(@__MODULE__, + filename, + maxiters = 3) + finally + rm(filename, force = true) + end + end + + @trixi_testset "With `solve` with `maxiters`" begin + # We need another example file that we include with `Base.include` first, in order to + # define the `solve` method without `trixi_include` trying to insert `maxiters` kwargs. + # Then, we can test that `trixi_include` inserts the kwarg in the `solve()` call. + example1 = """ + solve(; maxiters=0) = maxiters + """ + + example2 = """ + x = solve() + """ + + filename1 = tempname() + filename2 = tempname() + try + open(filename1, "w") do file + write(file, example1) + end + open(filename2, "w") do file + write(file, example2) + end + + # Use `@trixi_testset`, which wraps code in a temporary module, and call + # `Base.include` and `trixi_include` with `@__MODULE__` in order to isolate this test. + Base.include(@__MODULE__, filename1) + @test_warn "You just called" trixi_include(@__MODULE__, filename2) + @test @isdefined x + # This is the default `maxiters` inserted by `trixi_include` + @test x == 10^5 + + @test_warn "You just called" trixi_include(@__MODULE__, filename2, + maxiters = 7) + # Test that `maxiters` got overwritten + @test x == 7 + finally + rm(filename1, force = true) + rm(filename2, force = true) + end + end +end end end #module diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index d4416ac5b6a..5341d86a7d1 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -129,7 +129,10 @@ end 0.005243995459478956, 0.004685630332338153, 0.01750217718347713, - ]) + ], + # With the default `maxiters = 1` in coverage tests, + # there would be no time steps after the restart. + coverage_override=(maxiters = 100_000,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_visualization.jl b/test/test_visualization.jl index 48164a70fb3..6444dc91d5d 100644 --- a/test/test_visualization.jl +++ b/test/test_visualization.jl @@ -243,7 +243,6 @@ end @test_nowarn_mod trixi_include(@__MODULE__, joinpath(examples_dir(), "tree_2d_dgsem", "elixir_advection_basic.jl"), - tspan = (0, 0.1), analysis_callback = Trixi.TrivialCallback()) @test adapt_to_mesh_level(sol, 5) isa Tuple @@ -259,7 +258,6 @@ end @test_nowarn_mod trixi_include(@__MODULE__, joinpath(examples_dir(), "tree_3d_dgsem", "elixir_advection_basic.jl"), - tspan = (0, 0.1), analysis_callback = Trixi.TrivialCallback(), initial_refinement_level = 1) @test PlotData2D(sol) isa Trixi.PlotData2DCartesian @@ -288,8 +286,7 @@ end @test_nowarn_mod trixi_include(@__MODULE__, joinpath(examples_dir(), "structured_3d_dgsem", - "elixir_advection_basic.jl"), - tspan = (0, 0.1)) + "elixir_advection_basic.jl")) @testset "1D plot from 3D solution and general mesh" begin @testset "Create 1D plot as slice" begin