From 43dac2c68f255d2e1aa209da4b441fc9f4bae6ac Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Mon, 22 Jan 2024 17:33:19 +0100 Subject: [PATCH 01/22] Remove duplicate code (`rhs_parabolic!`) for 2D, 3D (#1810) * Remove duplicate code * comment --- src/solvers/dgsem_p4est/dg_2d_parabolic.jl | 26 ++++- src/solvers/dgsem_p4est/dg_3d_parabolic.jl | 112 --------------------- src/solvers/dgsem_tree/dg_2d_parabolic.jl | 2 +- src/solvers/dgsem_tree/dg_3d_parabolic.jl | 108 -------------------- 4 files changed, 24 insertions(+), 224 deletions(-) diff --git a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl index a7f3345168f..299f2f6140a 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl @@ -19,9 +19,29 @@ function create_cache_parabolic(mesh::P4estMesh{2}, equations_hyperbolic::Abstra return cache end -# TODO: Remove in favor of the implementation for the TreeMesh -# once the P4estMesh can handle mortars as well -function rhs_parabolic!(du, u, t, mesh::P4estMesh{2}, +#= +Reusing `rhs_parabolic!` for `TreeMesh`es is not easily possible as +for `P4estMesh`es we call + + ``` + prolong2mortars_divergence!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + + calc_mortar_flux_divergence!(cache_parabolic.elements.surface_flux_values, + mesh, equations_parabolic, dg.mortar, + dg.surface_integral, dg, cache) + ``` +instead of + ``` + prolong2mortars!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + + calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, mesh, + equations_parabolic, + dg.mortar, dg.surface_integral, dg, cache) + ``` +=# +function rhs_parabolic!(du, u, t, mesh::Union{P4estMesh{2}, P4estMesh{3}}, equations_parabolic::AbstractEquationsParabolic, initial_condition, boundary_conditions_parabolic, source_terms, dg::DG, parabolic_scheme, cache, cache_parabolic) diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl index 0bb97c7af02..83d663809a7 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl @@ -19,118 +19,6 @@ function create_cache_parabolic(mesh::P4estMesh{3}, equations_hyperbolic::Abstra return cache end -# This file collects all methods that have been updated to work with parabolic systems of equations -# -# assumptions: parabolic terms are of the form div(f(u, grad(u))) and -# will be discretized first order form as follows: -# 1. compute grad(u) -# 2. compute f(u, grad(u)) -# 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) -# boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -# TODO: Remove in favor of the implementation for the TreeMesh -# once the P4estMesh can handle mortars as well -function rhs_parabolic!(du, u, t, mesh::P4estMesh{3}, - equations_parabolic::AbstractEquationsParabolic, - initial_condition, boundary_conditions_parabolic, source_terms, - dg::DG, parabolic_scheme, cache, cache_parabolic) - @unpack viscous_container = cache_parabolic - @unpack u_transformed, gradients, flux_viscous = viscous_container - - # Convert conservative variables to a form more suitable for viscous flux calculations - @trixi_timeit timer() "transform variables" begin - transform_variables!(u_transformed, u, mesh, equations_parabolic, - dg, parabolic_scheme, cache, cache_parabolic) - end - - # Compute the gradients of the transformed variables - @trixi_timeit timer() "calculate gradient" begin - calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, - boundary_conditions_parabolic, dg, cache, cache_parabolic) - end - - # Compute and store the viscous fluxes - @trixi_timeit timer() "calculate viscous fluxes" begin - calc_viscous_fluxes!(flux_viscous, gradients, u_transformed, mesh, - equations_parabolic, dg, cache, cache_parabolic) - end - - # The remainder of this function is essentially a regular rhs! for parabolic - # equations (i.e., it computes the divergence of the viscous fluxes) - # - # OBS! In `calc_viscous_fluxes!`, the viscous flux values at the volume nodes of each element have - # been computed and stored in `fluxes_viscous`. In the following, we *reuse* (abuse) the - # `interfaces` and `boundaries` containers in `cache_parabolic` to interpolate and store the - # *fluxes* at the element surfaces, as opposed to interpolating and storing the *solution* (as it - # is done in the hyperbolic operator). That is, `interfaces.u`/`boundaries.u` store *viscous flux values* - # and *not the solution*. The advantage is that a) we do not need to allocate more storage, b) we - # do not need to recreate the existing data structure only with a different name, and c) we do not - # need to interpolate solutions *and* gradients to the surfaces. - - # TODO: parabolic; reconsider current data structure reuse strategy - - # Reset du - @trixi_timeit timer() "reset ∂u/∂t" reset_du!(du, dg, cache) - - # Calculate volume integral - @trixi_timeit timer() "volume integral" begin - calc_volume_integral!(du, flux_viscous, mesh, equations_parabolic, dg, cache) - end - - # Prolong solution to interfaces - @trixi_timeit timer() "prolong2interfaces" begin - prolong2interfaces!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate interface fluxes - @trixi_timeit timer() "interface flux" begin - calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, - equations_parabolic, dg, cache_parabolic) - end - - # Prolong solution to boundaries - @trixi_timeit timer() "prolong2boundaries" begin - prolong2boundaries!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate boundary fluxes - @trixi_timeit timer() "boundary flux" begin - calc_boundary_flux_divergence!(cache_parabolic, t, - boundary_conditions_parabolic, - mesh, equations_parabolic, - dg.surface_integral, dg) - end - - # Prolong solution to mortars (specialized for AbstractEquationsParabolic) - # !!! NOTE: we reuse the hyperbolic cache here since it contains "mortars" and "u_threaded" - # !!! Is this OK? - @trixi_timeit timer() "prolong2mortars" begin - prolong2mortars_divergence!(cache, flux_viscous, mesh, equations_parabolic, - dg.mortar, dg.surface_integral, dg) - end - - # Calculate mortar fluxes (specialized for AbstractEquationsParabolic) - @trixi_timeit timer() "mortar flux" begin - calc_mortar_flux_divergence!(cache_parabolic.elements.surface_flux_values, - mesh, equations_parabolic, dg.mortar, - dg.surface_integral, dg, cache) - end - - # Calculate surface integrals - @trixi_timeit timer() "surface integral" begin - calc_surface_integral!(du, u, mesh, equations_parabolic, - dg.surface_integral, dg, cache_parabolic) - end - - # Apply Jacobian from mapping to reference element - @trixi_timeit timer() "Jacobian" begin - apply_jacobian_parabolic!(du, mesh, equations_parabolic, dg, cache_parabolic) - end - - return nothing -end - function calc_gradient!(gradients, u_transformed, t, mesh::P4estMesh{3}, equations_parabolic, boundary_conditions_parabolic, dg::DG, diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl index 3083ae30680..b1c27343999 100644 --- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl @@ -13,7 +13,7 @@ # 2. compute f(u, grad(u)) # 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) # boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -function rhs_parabolic!(du, u, t, mesh::TreeMesh{2}, +function rhs_parabolic!(du, u, t, mesh::Union{TreeMesh{2}, TreeMesh{3}}, equations_parabolic::AbstractEquationsParabolic, initial_condition, boundary_conditions_parabolic, source_terms, dg::DG, parabolic_scheme, cache, cache_parabolic) diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl index 9ad28c6aa8e..ee0e7c6b069 100644 --- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl @@ -5,114 +5,6 @@ @muladd begin #! format: noindent -# This file collects all methods that have been updated to work with parabolic systems of equations -# -# assumptions: parabolic terms are of the form div(f(u, grad(u))) and -# will be discretized first order form as follows: -# 1. compute grad(u) -# 2. compute f(u, grad(u)) -# 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) -# boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -function rhs_parabolic!(du, u, t, mesh::TreeMesh{3}, - equations_parabolic::AbstractEquationsParabolic, - initial_condition, boundary_conditions_parabolic, source_terms, - dg::DG, parabolic_scheme, cache, cache_parabolic) - @unpack viscous_container = cache_parabolic - @unpack u_transformed, gradients, flux_viscous = viscous_container - - # Convert conservative variables to a form more suitable for viscous flux calculations - @trixi_timeit timer() "transform variables" begin - transform_variables!(u_transformed, u, mesh, equations_parabolic, - dg, parabolic_scheme, cache, cache_parabolic) - end - - # Compute the gradients of the transformed variables - @trixi_timeit timer() "calculate gradient" begin - calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, - boundary_conditions_parabolic, dg, cache, cache_parabolic) - end - - # Compute and store the viscous fluxes - @trixi_timeit timer() "calculate viscous fluxes" begin - calc_viscous_fluxes!(flux_viscous, gradients, u_transformed, mesh, - equations_parabolic, dg, cache, cache_parabolic) - end - - # The remainder of this function is essentially a regular rhs! for parabolic - # equations (i.e., it computes the divergence of the viscous fluxes) - # - # OBS! In `calc_viscous_fluxes!`, the viscous flux values at the volume nodes of each element have - # been computed and stored in `fluxes_viscous`. In the following, we *reuse* (abuse) the - # `interfaces` and `boundaries` containers in `cache_parabolic` to interpolate and store the - # *fluxes* at the element surfaces, as opposed to interpolating and storing the *solution* (as it - # is done in the hyperbolic operator). That is, `interfaces.u`/`boundaries.u` store *viscous flux values* - # and *not the solution*. The advantage is that a) we do not need to allocate more storage, b) we - # do not need to recreate the existing data structure only with a different name, and c) we do not - # need to interpolate solutions *and* gradients to the surfaces. - - # TODO: parabolic; reconsider current data structure reuse strategy - - # Reset du - @trixi_timeit timer() "reset ∂u/∂t" reset_du!(du, dg, cache) - - # Calculate volume integral - @trixi_timeit timer() "volume integral" begin - calc_volume_integral!(du, flux_viscous, mesh, equations_parabolic, dg, cache) - end - - # Prolong solution to interfaces - @trixi_timeit timer() "prolong2interfaces" begin - prolong2interfaces!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate interface fluxes - @trixi_timeit timer() "interface flux" begin - calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, - equations_parabolic, dg, cache_parabolic) - end - - # Prolong solution to boundaries - @trixi_timeit timer() "prolong2boundaries" begin - prolong2boundaries!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate boundary fluxes - @trixi_timeit timer() "boundary flux" begin - calc_boundary_flux_divergence!(cache_parabolic, t, - boundary_conditions_parabolic, - mesh, equations_parabolic, - dg.surface_integral, dg) - end - - # Prolong solution to mortars - @trixi_timeit timer() "prolong2mortars" begin - prolong2mortars!(cache, flux_viscous, mesh, equations_parabolic, - dg.mortar, dg.surface_integral, dg) - end - - # Calculate mortar fluxes - @trixi_timeit timer() "mortar flux" begin - calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, mesh, - equations_parabolic, - dg.mortar, dg.surface_integral, dg, cache) - end - - # Calculate surface integrals - @trixi_timeit timer() "surface integral" begin - calc_surface_integral!(du, u, mesh, equations_parabolic, - dg.surface_integral, dg, cache_parabolic) - end - - # Apply Jacobian from mapping to reference element - @trixi_timeit timer() "Jacobian" begin - apply_jacobian_parabolic!(du, mesh, equations_parabolic, dg, cache_parabolic) - end - - return nothing -end - # Transform solution variables prior to taking the gradient # (e.g., conservative to primitive variables). Defaults to doing nothing. # TODO: can we avoid copying data? From ba812be5ae6fb7170a85b9207253c8c89168ea76 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Tue, 23 Jan 2024 16:04:58 +0100 Subject: [PATCH 02/22] Add Link to Paper for Weakly Enforced BCs to Docs (#1811) * Add Paper for Weakly Enforced BCs to Docs * spelling --- docs/literate/src/files/non_periodic_boundaries.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/literate/src/files/non_periodic_boundaries.jl b/docs/literate/src/files/non_periodic_boundaries.jl index 7ed6324ff99..3b238ad4533 100644 --- a/docs/literate/src/files/non_periodic_boundaries.jl +++ b/docs/literate/src/files/non_periodic_boundaries.jl @@ -16,6 +16,8 @@ # state as arguments, and solves an approximate Riemann problem to introduce dissipation (and # hence stabilization) at the boundary. Hence, the performance of the Dirichlet BC depends on the # fidelity of the numerical surface flux. +# An easy-to read introductory reference on this topic is the paper by +# [Mengaldo et al.](https://doi.org/10.2514/6.2014-2923). # The passed boundary value function is called with the same arguments as an initial condition # function, i.e. From 585fb93cfba5712401fee00d06aeeedf8db5fe7e Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 25 Jan 2024 10:13:00 +0100 Subject: [PATCH 03/22] Different Boundary Conditions for Standard Abaqus (#1799) * first take on BCs for standard Abaqus * simplify * Couple comments, example * comments * shorten code * refactor * fmt * refactor * 3D * tests and fmt * comment * news * comment * stick to polydeg * polydeg 3 * polydeg * Update examples/p4est_2d_dgsem/elixir_euler_airfoil_mach2.jl Co-authored-by: Andrew Winters * Update examples/p4est_2d_dgsem/elixir_euler_airfoil_mach2.jl Co-authored-by: Andrew Winters * Update src/meshes/p4est_mesh.jl Co-authored-by: Andrew Winters * Update NEWS.md Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl * improve (?) formatting * fmt * split constructor * comment * two boundaries * comment * comment * rename elixir, change mesh * comments * add doc * avoid unicode * improve doc * Add tutorial * typos * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Andrew Winters * Update docs/src/meshes/p4est_mesh.md * Update docs/literate/src/files/p4est_from_gmsh.jl * Update docs/literate/src/files/p4est_from_gmsh.jl * Update docs/literate/src/files/p4est_from_gmsh.jl * Update docs/literate/src/files/p4est_from_gmsh.jl * Update docs/literate/src/files/p4est_from_gmsh.jl * Update docs/literate/src/files/p4est_from_gmsh.jl * Apply suggestions from code review Polish * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Andrew Winters * Comments * Update p4est_mesh.jl * Update src/meshes/p4est_mesh.jl Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> * more robust parsing * print warning if there has been a bc name supplied for which no nodes have been found * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Hendrik Ranocha * Update docs/literate/src/files/p4est_from_gmsh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/p4est_mesh.jl Co-authored-by: Hendrik Ranocha * Update docs/literate/src/files/p4est_from_gmsh.jl --------- Co-authored-by: Andrew Winters Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Johannes Markert <10619309+jmark@users.noreply.github.com> Co-authored-by: Hendrik Ranocha --- NEWS.md | 2 + docs/literate/src/files/p4est_from_gmsh.jl | 459 ++++++++++++++++++ docs/make.jl | 1 + docs/src/meshes/p4est_mesh.md | 256 +++++++++- .../elixir_euler_NACA6412airfoil_mach2.jl | 108 +++++ .../elixir_euler_free_stream_boundaries.jl | 60 +++ src/meshes/p4est_mesh.jl | 226 ++++++++- test/test_p4est_2d.jl | 21 + test/test_p4est_3d.jl | 23 + 9 files changed, 1145 insertions(+), 11 deletions(-) create mode 100644 docs/literate/src/files/p4est_from_gmsh.jl create mode 100644 examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl create mode 100644 examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl diff --git a/NEWS.md b/NEWS.md index cf695912ed7..3a3a504a911 100644 --- a/NEWS.md +++ b/NEWS.md @@ -9,6 +9,8 @@ for human readability. #### Added - AMR for hyperbolic-parabolic equations on 3D `P4estMesh` - `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` +- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, + can now be digested by Trixi in 2D and 3D. ## Changes when updating to v0.6 from v0.5.x diff --git a/docs/literate/src/files/p4est_from_gmsh.jl b/docs/literate/src/files/p4est_from_gmsh.jl new file mode 100644 index 00000000000..356339cdd47 --- /dev/null +++ b/docs/literate/src/files/p4est_from_gmsh.jl @@ -0,0 +1,459 @@ +#src # `P4estMesh` from [`gmsh`](https://gmsh.info/) + +# Trixi.jl supports numerical approximations from structured and unstructured quadrilateral meshes +# with the [`P4estMesh`](@ref) mesh type. + +# The purpose of this tutorial is to demonstrate how to use the `P4estMesh` +# functionality of Trixi.jl for existing meshes with straight-sided (bilinear) elements/cells. +# This begins by running and visualizing an available unstructured quadrilateral mesh example. +# Then, the tutorial will cover how to use existing meshes generated by [`gmsh`](https://gmsh.info/) +# or any other meshing software that can export to the Abaqus input `.inp` format. + +# ## Running the simulation of a near-field flow around an airfoil + +# Trixi.jl supports solving hyperbolic-parabolic problems on several mesh types. +# A somewhat complex example that employs the `P4estMesh` is the near-field simulation of a +# Mach 2 flow around the NACA6412 airfoil. + +using Trixi +redirect_stdio(stdout=devnull, stderr=devnull) do # code that prints annoying stuff we don't want to see here #hide #md +trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_euler_NACA6412airfoil_mach2.jl"), tspan=(0.0, 0.5)) +end #hide #md + +# Conveniently, we use the Plots package to have a first look at the results: +# ```julia +# using Plots +# pd = PlotData2D(sol) +# plot(pd["rho"]) +# plot!(getmesh(pd)) +# ``` + +# ## Creating a mesh using `gmsh` + +# The creation of an unstructured quadrilateral mesh using `gmsh` is driven by a **geometry file**. +# There are plenty of possibilities for the user, see the [documentation](https://gmsh.info/doc/texinfo/gmsh.html) and [tutorials](https://gitlab.onelab.info/gmsh/gmsh/tree/master/tutorials). + +# To begin, we provide a complete geometry file for the NACA6412 airfoil bounded by a rectangular box. After this we give a breakdown +# of the most important parts required for successful mesh generation that can later be used by the `p4est` library +# and Trixi.jl. +# We emphasize that this near-field mesh should only be used for instructive purposes and not for actual production runs. + +# The associated `NACA6412.geo` file is given below: +# ```c++ +# // GMSH geometry script for a NACA 6412 airfoil with 11 degree angle of attack +# // in a box (near-field mesh). +# // see https://github.com/cfsengineering/GMSH-Airfoil-2D +# // for software to generate gmsh `.geo` geometry files for NACA airfoils. +# +# // outer bounding box +# Point(1) = {-1.25, -0.5, 0, 1.0}; +# Point(2) = {1.25, -0.5, 0, 1.0}; +# Point(3) = {1.25, 0.5, 0, 1.0}; +# Point(4) = {-1.25, 0.5, 0, 1.0}; +# +# // lines of the bounding box +# Line(1) = {1, 2}; +# Line(2) = {2, 3}; +# Line(3) = {3, 4}; +# Line(4) = {4, 1}; +# // outer box +# Line Loop(8) = {1, 2, 3, 4}; +# +# // Settings +# // This value gives the global element size factor (lower -> finer mesh) +# Mesh.CharacteristicLengthFactor = 1.0 * 2^(-3); +# // Insist on quads instead of default triangles +# Mesh.RecombineAll = 1; +# // Violet instead of green base color for better visibility +# Mesh.ColorCarousel = 0; +# +# // points of the airfoil contour +# // Format: {x, y, z, DesiredCellSize}. See the documentation: https://gmsh.info/doc/texinfo/gmsh.html#Points +# // These concrete points are generated using the tool from https://github.com/cfsengineering/GMSH-Airfoil-2D +# Point(5) = {-0.4900332889206208, 0.09933466539753061, 0, 0.125}; +# Point(6) = {-0.4900274857651495, 0.1021542752054094, 0, 0.125}; +# Point(7) = {-0.4894921489729144, 0.1049830248247787, 0, 0.125}; +# Point(8) = {-0.4884253336670712, 0.1078191282319664, 0, 0.125}; +# Point(9) = {-0.4868257975566199, 0.1106599068424483, 0, 0.125}; +# Point(10) = {-0.4846930063965668, 0.1135018003016681, 0, 0.125}; +# Point(11) = {-0.4820271400142729, 0.1163403835785654, 0, 0.125}; +# Point(12) = {-0.4788290988083472, 0.1191703902233889, 0, 0.125}; +# Point(13) = {-0.4751005105908123, 0.1219857416089041, 0, 0.125}; +# Point(14) = {-0.4708437376101668, 0.1247795819332056, 0, 0.125}; +# Point(15) = {-0.4660618835629463, 0.1275443187232316, 0, 0.125}; +# Point(16) = {-0.4607588003749649, 0.1302716685409717, 0, 0.125}; +# Point(17) = {-0.4549390945110529, 0.132952707559475, 0, 0.125}; +# Point(18) = {-0.448608132554204, 0.1355779266432996, 0, 0.125}; +# Point(19) = {-0.4417720457819508, 0.138137290538182, 0, 0.125}; +# Point(20) = {-0.4344377334597768, 0.140620300747629, 0, 0.125}; +# Point(21) = {-0.4266128645686593, 0.1430160616500159, 0, 0.125}; +# Point(22) = {-0.4183058776865576, 0.1453133493887722, 0, 0.125}; +# Point(23) = {-0.4095259787518715, 0.147500683050503, 0, 0.125}; +# Point(24) = {-0.4002831364505879, 0.1495663976315875, 0, 0.125}; +# Point(25) = {-0.3905880749878933, 0.1514987182830453, 0, 0.125}; +# Point(26) = {-0.3804522640292948, 0.1532858353164163, 0, 0.125}; +# Point(27) = {-0.3698879056254708, 0.1549159794501833, 0, 0.125}; +# Point(28) = {-0.3589079179688306, 0.1563774967770029, 0, 0.125}; +# Point(29) = {-0.3475259158676376, 0.1576589229368209, 0, 0.125}; +# Point(30) = {-0.3357561878650377, 0.158749055989923, 0, 0.125}; +# Point(31) = {-0.3236136699747923, 0.1596370274972017, 0, 0.125}; +# Point(32) = {-0.3111139160522804, 0.1603123713324616, 0, 0.125}; +# Point(33) = {-0.298273064867608, 0.160765089773461, 0, 0.125}; +# Point(34) = {-0.2851078039966239, 0.1609857164445887, 0, 0.125}; +# Point(35) = {-0.2716353306943914, 0.160965375714529, 0, 0.125}; +# Point(36) = {-0.2578733099632437, 0.1606958381868515, 0, 0.125}; +# Point(37) = {-0.2438398300730194, 0.1601695719599709, 0, 0.125}; +# Point(38) = {-0.2295533558334121, 0.1593797893750759, 0, 0.125}; +# Point(39) = {-0.2150326799566391, 0.1583204890160489, 0, 0.125}; +# Point(40) = {-0.2002968728818922, 0.1569864927736143, 0, 0.125}; +# Point(41) = {-0.18536523146042, 0.1553734778363979, 0, 0.125}; +# Point(42) = {-0.1702572269208345, 0.1534780035235666, 0, 0.125}; +# Point(43) = {-0.1549924525477129, 0.1512975329264932, 0, 0.125}; +# Point(44) = {-0.1395905715122586, 0.1488304493795921, 0, 0.125}; +# Point(45) = {-0.1240712652914332, 0.1460760678321895, 0, 0.125}; +# Point(46) = {-0.1084541831014299, 0.1430346412430583, 0, 0.125}; +# Point(47) = {-0.09275889275279087, 0.1397073621660917, 0, 0.125}; +# Point(48) = {-0.07700483330818747, 0.1360963597385416, 0, 0.125}; +# Point(49) = {-0.06151286635366404, 0.1323050298149023, 0, 0.125}; +# Point(50) = {-0.04602933219022032, 0.1283521764905442, 0, 0.125}; +# Point(51) = {-0.03051345534800332, 0.1242331665904082, 0, 0.125}; +# Point(52) = {-0.01498163190522334, 0.1199540932779839, 0, 0.125}; +# Point(53) = {0.0005498526140696458, 0.1155214539466913, 0, 0.125}; +# Point(54) = {0.01606484191716884, 0.1109421303284033, 0, 0.125}; +# Point(55) = {0.03154732664394777, 0.106223368423828, 0, 0.125}; +# Point(56) = {0.0469814611314705, 0.1013727584299359, 0, 0.125}; +# Point(57) = {0.06235157928986135, 0.09639821481480275, 0, 0.125}; +# Point(58) = {0.07764220964363855, 0.09130795666388933, 0, 0.125}; +# Point(59) = {0.09283808959671735, 0.08611048839446452, 0, 0.125}; +# Point(60) = {0.1079241789809607, 0.08081458090718853, 0, 0.125}; +# Point(61) = {0.1228856729475325, 0.07542925321638272, 0, 0.125}; +# Point(62) = {0.1377080142575372, 0.06996375457378261, 0, 0.125}; +# Point(63) = {0.1523769050236616, 0.06442754707512513, 0, 0.125}; +# Point(64) = {0.1668783179480157, 0.05883028871526293, 0, 0.125}; +# Point(65) = {0.1811985070933818, 0.05318181683604975, 0, 0.125}; +# Point(66) = {0.1953240182159306, 0.04749213189240609, 0, 0.125}; +# Point(67) = {0.2092416986775084, 0.04177138144606024, 0, 0.125}; +# Point(68) = {0.2229387069452062, 0.03602984428372727, 0, 0.125}; +# Point(69) = {0.2364025216754475, 0.03027791454712048, 0, 0.125}; +# Point(70) = {0.2496209503696738, 0.02452608575629232, 0, 0.125}; +# Point(71) = {0.2625821375791982, 0.01878493460541621, 0, 0.125}; +# Point(72) = {0.2752745726282818, 0.01306510441121807, 0, 0.125}; +# Point(73) = {0.28768709681727, 0.007377288098728577, 0, 0.125}; +# Point(74) = {0.2998089100619555, 0.001732210616722449, 0, 0.125}; +# Point(75) = {0.3116295769214332, -0.003859389314124759, 0, 0.125}; +# Point(76) = {0.3231390319647309, -0.009386778203927332, 0, 0.125}; +# Point(77) = {0.3343275844265582, -0.01483924761490708, 0, 0.125}; +# Point(78) = {0.3451859221046181, -0.02020613485126957, 0, 0.125}; +# Point(79) = {0.3557051144551212, -0.02547684454806881, 0, 0.125}; +# Point(80) = {0.3658766148492779, -0.03064087116872238, 0, 0.125}; +# Point(81) = {0.3756922619615632, -0.0356878223992288, 0, 0.125}; +# Point(82) = {0.3851442802702071, -0.0406074434050937, 0, 0.125}; +# Point(83) = {0.394225279661484, -0.04538964189492445, 0, 0.125}; +# Point(84) = {0.4029282541416501, -0.05002451391298904, 0, 0.125}; +# Point(85) = {0.4112465796735204, -0.05450237026215737, 0, 0.125}; +# Point(86) = {0.4191740111683733, -0.05881376343890812, 0, 0.125}; +# Point(87) = {0.4267046786777481, -0.06294951494382847, 0, 0.125}; +# Point(88) = {0.4338330828434404, -0.06690074281456823, 0, 0.125}; +# Point(89) = {0.4405540896772232, -0.07065888921378868, 0, 0.125}; +# Point(90) = {0.4468629247542237, -0.07421574789251445, 0, 0.125}; +# Point(91) = {0.4527551669150955, -0.0775634913396257, 0, 0.125}; +# Point(92) = {0.4582267415819197, -0.08069469742118066, 0, 0.125}; +# Point(93) = {0.4632739138007936, -0.08360237530891265, 0, 0.125}; +# Point(94) = {0.4678932811302005, -0.08627999049569551, 0, 0.125}; +# Point(95) = {0.4720817664982195, -0.08872148869699745, 0, 0.125}; +# Point(96) = {0.4758366111533843, -0.09092131844134463, 0, 0.125}; +# Point(97) = {0.4791553678333992, -0.09287445215953141, 0, 0.125}; +# Point(98) = {0.4820358942729613, -0.09457640559161551, 0, 0.125}; +# Point(99) = {0.4844763471666588, -0.09602325534252773, 0, 0.125}; +# Point(100) = {0.4864751766953637, -0.09721165443119822, 0, 0.125}; +# Point(101) = {0.4880311217148797, -0.09813884569428721, 0, 0.125}; +# Point(102) = {0.4891432056939881, -0.09880267292366274, 0, 0.125}; +# Point(103) = {0.4898107334756874, -0.09920158963645126, 0, 0.125}; +# Point(104) = {0.4900332889206208, -0.09933466539753058, 0, 0.125}; +# Point(105) = {0.4897824225031319, -0.09926905587549506, 0, 0.125}; +# Point(106) = {0.4890301110661922, -0.09907236506934192, 0, 0.125}; +# Point(107) = {0.4877772173496635, -0.09874500608402761, 0, 0.125}; +# Point(108) = {0.48602517690576, -0.09828766683852558, 0, 0.125}; +# Point(109) = {0.4837759946062035, -0.09770130916007558, 0, 0.125}; +# Point(110) = {0.4810322398085871, -0.09698716747297723, 0, 0.125}; +# Point(111) = {0.4777970402368822, -0.09614674703990023, 0, 0.125}; +# Point(112) = {0.4740740746447117, -0.09518182170326678, 0, 0.125}; +# Point(113) = {0.4698675643422793, -0.09409443106501386, 0, 0.125}; +# Point(114) = {0.4651822636784212, -0.09288687703518478, 0, 0.125}; +# Point(115) = {0.460023449577924, -0.09156171967354482, 0, 0.125}; +# Point(116) = {0.4543969102408585, -0.09012177224394632, 0, 0.125}; +# Point(117) = {0.4483089331151018, -0.08857009539864649, 0, 0.125}; +# Point(118) = {0.4417662922553667, -0.08690999040934186, 0, 0.125}; +# Point(119) = {0.4347762351819332, -0.0851449913634191, 0, 0.125}; +# Point(120) = {0.4273464693498908, -0.08327885624791403, 0, 0.125}; +# Point(121) = {0.419485148335155, -0.08131555684993674, 0, 0.125}; +# Point(122) = {0.411200857836944, -0.07925926741086739, 0, 0.125}; +# Point(123) = {0.4025026015879757, -0.07711435198240155, 0, 0.125}; +# Point(124) = {0.3933997872536054, -0.07488535044544484, 0, 0.125}; +# Point(125) = {0.3839022123897198, -0.07257696316779733, 0, 0.125}; +# Point(126) = {0.3740200505167618, -0.07019403429336624, 0, 0.125}; +# Point(127) = {0.3637638373540689, -0.06774153367408606, 0, 0.125}; +# Point(128) = {0.3531444572451353, -0.06522453747557577, 0, 0.125}; +# Point(129) = {0.3421731297908021, -0.06264820750853495, 0, 0.125}; +# Point(130) = {0.3308613966940724, -0.06001776935966011, 0, 0.125}; +# Point(131) = {0.3192211088076166, -0.05733848941811218, 0, 0.125}; +# Point(132) = {0.3072644133633567, -0.05461565091590426, 0, 0.125}; +# Point(133) = {0.2950037413531683, -0.05185452912263369, 0, 0.125}; +# Point(134) = {0.2824517950208982, -0.04906036585632723, 0, 0.125}; +# Point(135) = {0.2696215354188702, -0.04623834349241404, 0, 0.125}; +# Point(136) = {0.2565261699769623, -0.04339355867155523, 0, 0.125}; +# Point(137) = {0.2431791400293651, -0.04053099592384862, 0, 0.125}; +# Point(138) = {0.2295941082432855, -0.03765550144139543, 0, 0.125}; +# Point(139) = {0.2157849458952252, -0.03477175724299444, 0, 0.125}; +# Point(140) = {0.2017657199439165, -0.03188425598348005, 0, 0.125}; +# Point(141) = {0.187550679854507, -0.02899727666564914, 0, 0.125}; +# Point(142) = {0.1731542441359161, -0.02611486151457043, 0, 0.125}; +# Point(143) = {0.1585909865622793, -0.02324079427214604, 0, 0.125}; +# Point(144) = {0.1438756220597465, -0.02037858016395433, 0, 0.125}; +# Point(145) = {0.129022992251319, -0.0175314277805827, 0, 0.125}; +# Point(146) = {0.1140480506645569, -0.01470223310184333, 0, 0.125}; +# Point(147) = {0.09896584761949168, -0.01189356587453844, 0, 0.125}; +# Point(148) = {0.08379151482656089, -0.009107658532933174, 0, 0.125}; +# Point(149) = {0.06854024973648176, -0.006346397826038436, 0, 0.125}; +# Point(150) = {0.05322729969528361, -0.003611319287478529, 0, 0.125}; +# Point(151) = {0.03786794596792287, -0.00090360465249055, 0, 0.125}; +# Point(152) = {0.0224774877026287, 0.00177591770710904, 0, 0.125}; +# Point(153) = {0.007071225915134205, 0.004426769294862437, 0, 0.125}; +# Point(154) = {-0.00833555242305456, 0.007048814950562587, 0, 0.125}; +# Point(155) = {-0.02372759010533726, 0.009642253300220296, 0, 0.125}; +# Point(156) = {-0.03908967513210498, 0.01220760427359278, 0, 0.125}; +# Point(157) = {-0.05440665578848514, 0.01474569380579989, 0, 0.125}; +# Point(158) = {-0.06966345527617318, 0.01725763587663899, 0, 0.125}; +# Point(159) = {-0.08484508582421563, 0.01974481207672138, 0, 0.125}; +# Point(160) = {-0.09987987792382108, 0.02219618763023203, 0, 0.125}; +# Point(161) = {-0.1145078729404739, 0.02450371976411331, 0, 0.125}; +# Point(162) = {-0.1290321771824579, 0.0267015185742735, 0, 0.125}; +# Point(163) = {-0.143440065923266, 0.02879471001709845, 0, 0.125}; +# Point(164) = {-0.1577189448447794, 0.03078883518202784, 0, 0.125}; +# Point(165) = {-0.1718563428491159, 0.03268980457290044, 0, 0.125}; +# Point(166) = {-0.1858399037768357, 0.03450385196323842, 0, 0.125}; +# Point(167) = {-0.1996573773370766, 0.03623748825421298, 0, 0.125}; +# Point(168) = {-0.2132966095779342, 0.03789745574015834, 0, 0.125}; +# Point(169) = {-0.2267455332406906, 0.0394906831577609, 0, 0.125}; +# Point(170) = {-0.2399921583489679, 0.04102424186233269, 0, 0.125}; +# Point(171) = {-0.2530245633834605, 0.04250530343879837, 0, 0.125}; +# Point(172) = {-0.2658308873846617, 0.04394109901707172, 0, 0.125}; +# Point(173) = {-0.2783993233102972, 0.04533888052223981, 0, 0.125}; +# Point(174) = {-0.2907181129514687, 0.04670588405019788, 0, 0.125}; +# Point(175) = {-0.3027755436824813, 0.0480492955198111, 0, 0.125}; +# Point(176) = {-0.3145599472847223, 0.04937621871394801, 0, 0.125}; +# Point(177) = {-0.3260597010456697, 0.05069364578437131, 0, 0.125}; +# Point(178) = {-0.337263231291058, 0.05200843025992359, 0, 0.125}; +# Point(179) = {-0.3481590194623916, 0.05332726256406103, 0, 0.125}; +# Point(180) = {-0.3587356108043638, 0.05465664801682354, 0, 0.125}; +# Point(181) = {-0.3689816256782782, 0.0560028872679817, 0, 0.125}; +# Point(182) = {-0.3788857734692287, 0.05737205908247899, 0, 0.125}; +# Point(183) = {-0.3884368690074614, 0.05877000537646382, 0, 0.125}; +# Point(184) = {-0.3976238513788748, 0.06020231838219783, 0, 0.125}; +# Point(185) = {-0.40643580495675, 0.06167432980291591, 0, 0.125}; +# Point(186) = {-0.4148619824472646, 0.06319110180426264, 0, 0.125}; +# Point(187) = {-0.4228918297057104, 0.06475741967717524, 0, 0.125}; +# Point(188) = {-0.43051501204915, 0.06637778599795482, 0, 0.125}; +# Point(189) = {-0.4377214417649294, 0.06805641610468524, 0, 0.125}; +# Point(190) = {-0.4445013064933708, 0.06979723470503821, 0, 0.125}; +# Point(191) = {-0.4508450981473512, 0.07160387342876083, 0, 0.125}; +# Point(192) = {-0.4567436420215075, 0.073479669138689, 0, 0.125}; +# Point(193) = {-0.4621881257395756, 0.07542766281688272, 0, 0.125}; +# Point(194) = {-0.4671701276898881, 0.07745059884734995, 0, 0.125}; +# Point(195) = {-0.471681644606229, 0.07955092452372269, 0, 0.125}; +# Point(196) = {-0.4757151179639407, 0.0817307896190848, 0, 0.125}; +# Point(197) = {-0.4792634588791559, 0.0839920458658267, 0, 0.125}; +# Point(198) = {-0.4823200712220043, 0.08633624620581726, 0, 0.125}; +# Point(199) = {-0.4848788726822436, 0.08876464368523246, 0, 0.125}; +# Point(200) = {-0.4869343135575803, 0.09127818988394577, 0, 0.125}; +# Point(201) = {-0.4884813930704814, 0.09387753278635144, 0, 0.125}; +# Point(202) = {-0.4895156730580155, 0.09656301401871749, 0, 0.125}; +# +# // splines of the airfoil +# Spline(5) = {5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104}; +# Spline(6) = {104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,5}; +# +# // airfoil +# Line Loop(9) = {5, 6}; +# // complete domain +# Plane Surface(1) = {8, 9}; +# +# // labeling of the boundary parts +# Physical Line(1) = {4}; // inflow +# Physical Line(2) = {2}; // outflow +# Physical Line(3) = {1, 3}; // airfoil +# Physical Line(4) = {5, 6}; // upper/lower wall +# Physical Surface(1) = {10}; +# ``` +# From which we can construct a mesh like this: +# ![mesh_screenshot](https://github.com/trixi-framework/Trixi.jl/assets/75639095/67adfe3d-d403-4cd3-acaa-971a34df0709) +# +# The first four points define the bounding box = (near-field) domain: +# ```c++ +# // outer bounding box +# Point(1) = {-1.25, -0.5, 0, 1.0}; +# Point(2) = {1.25, -0.5, 0, 1.0}; +# Point(3) = {1.25, 0.5, 0, 1.0}; +# Point(4) = {-1.25, 0.5, 0, 1.0}; +# ``` +# which is constructed from connecting the points in lines: +# ```c++ +# // outer box +# Line(1) = {1, 2}; +# Line(2) = {2, 3}; +# Line(3) = {3, 4}; +# Line(4) = {4, 1}; +# // outer box +# Line Loop(8) = {1, 2, 3, 4}; +# ``` +# +# This is followed by a couple (in principle optional) settings where the most important one is +# ```c++ +# // Insist on quads instead of default triangles +# Mesh.RecombineAll = 1; +# ``` +# which forces `gmsh` to generate quadrilateral elements instead of the default triangles. +# This is strictly required to be able to use the mesh later with `p4est`, which supports only straight-sided quads, +# i.e., `C2D4, CPS4, S4` in 2D and `C3D` in 3D. +# See for more details the (short) [documentation](https://p4est.github.io/p4est-howto.pdf) on the interaction of `p4est` with `.inp` files. +# In principle, it should also be possible to use the `recombine` function of `gmsh` to convert the triangles to quads, +# but this is observed to be less robust than enforcing quads from the beginning. +# +# Then the airfoil is defined by a set of points: +# ```c++ +# // points of the airfoil contour +# Point(5) = {-0.4900332889206208, 0.09933466539753061, 0, 0.125}; +# Point(6) = {-0.4900274857651495, 0.1021542752054094, 0, 0.125}; +# ... +# ``` +# which are connected by splines for the upper and lower part of the airfoil: +# ```c++ +# // splines of the airfoil +# Spline(5) = {5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20, +# ... +# 96,97,98,99,100,101,102,103,104}; +# Spline(6) = {104,105,106,107,108,109,110,111,112,113,114,115, +# ... +# 200,201,202,5}; +# ``` +# which are then connected to form a single line loop for easy physical group assignment: +# ```c++ +# // airfoil +# Line Loop(9) = {5, 6}; +# ``` +# +# At the end of the file the physical groups are defined: +# ```c++ +# // labeling of the boundary parts +# Physical Line(1) = {4}; // Inflow. Label in Abaqus .inp file: PhysicalLine1 +# Physical Line(2) = {2}; // Outflow. Label in Abaqus .inp file: PhysicalLine2 +# Physical Line(3) = {1, 3}; // Airfoil. Label in Abaqus .inp file: PhysicalLine3 +# Physical Line(4) = {5, 6}; //Upper and lower wall/farfield/... Label in Abaqus .inp file: PhysicalLine4 +# ``` +# which are crucial for the correct assignment of boundary conditions in `Trixi.jl`. +# In particular, it is the responsibility of a user to keep track on the physical boundary names between the mesh generation and assignment of boundary condition functions in an elixir. +# +# After opening this file in `gmsh`, meshing the geometry and exporting to Abaqus `.inp` format, +# we can have a look at the input file: +# ``` +# *Heading +# +# *NODE +# 1, -1.25, -0.5, 0 +# 2, 1.25, -0.5, 0 +# 3, 1.25, 0.5, 0 +# 4, -1.25, 0.5, 0 +# ... +# ******* E L E M E N T S ************* +# *ELEMENT, type=T3D2, ELSET=Line1 +# 1, 1, 7 +# ... +# *ELEMENT, type=CPS4, ELSET=Surface1 +# 191, 272, 46, 263, 807 +# ... +# *NSET,NSET=PhysicalLine1 +# 1, 4, 52, 53, 54, 55, 56, 57, 58, +# *NSET,NSET=PhysicalLine2 +# 2, 3, 26, 27, 28, 29, 30, 31, 32, +# *NSET,NSET=PhysicalLine3 +# 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, +# 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, +# 23, 24, 25, 33, 34, 35, 36, 37, 38, 39, +# 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, +# 50, 51, +# *NSET,NSET=PhysicalLine4 +# 5, 6, 59, 60, 61, 62, 63, 64, 65, 66, +# 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, +# 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, +# 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, +# 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, +# 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, +# 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, +# 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, +# 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, +# 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, +# 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, +# 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, +# 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, +# 187, 188, 189, 190, +# ``` +# +# First, the coordinates of the nodes are listed, followed by the elements. +# Note that `gmsh` exports also line elements of type `T3D2` which are ignored by `p4est`. +# The relevant elements in 2D which form the gridcells are of type `CPS4` which are defined by their four corner nodes. +# This is followed by the nodesets encoded via `*NSET` which are used to assign boundary conditions in Trixi.jl. +# Trixi.jl parses the `.inp` file and assigns the edges (in 2D, surfaces in 3D) of elements to the corresponding boundary condition based on +# the supplied `boundary_symbols` that have to be supplied to the `P4estMesh` constructor: +# ```julia +# # boundary symbols +# boundary_symbols = [:PhysicalLine1, :PhysicalLine2, :PhysicalLine3, :PhysicalLine4] +# mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) +# ``` +# The same boundary symbols have then also be supplied to the semidiscretization alongside the +# corresponding physical boundary conditions: +# ```julia +# # Supersonic inflow boundary condition. +# # Calculate the boundary flux entirely from the external solution state, i.e., set +# # external solution state values for everything entering the domain. +# @inline function boundary_condition_supersonic_inflow(u_inner, +# normal_direction::AbstractVector, +# x, t, surface_flux_function, +# equations::CompressibleEulerEquations2D) +# u_boundary = initial_condition_mach2_flow(x, t, equations) +# flux = Trixi.flux(u_boundary, normal_direction, equations) +# +# return flux +# end +# +# # Supersonic outflow boundary condition. +# # Calculate the boundary flux entirely from the internal solution state. Analogous to supersonic inflow +# # except all the solution state values are set from the internal solution as everything leaves the domain +# @inline function boundary_condition_supersonic_outflow(u_inner, +# normal_direction::AbstractVector, x, +# t, +# surface_flux_function, +# equations::CompressibleEulerEquations2D) +# flux = Trixi.flux(u_inner, normal_direction, equations) +# +# boundary_conditions = Dict(:PhysicalLine1 => boundary_condition_supersonic_inflow, # Left boundary +# :PhysicalLine2 => boundary_condition_supersonic_outflow, # Right boundary +# :PhysicalLine3 => boundary_condition_slip_wall, # Airfoil +# :PhysicalLine4 => boundary_condition_supersonic_outflow) # Top and bottom boundary +# +# semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, +# boundary_conditions = boundary_conditions) +# ``` +# Note that you **have to** supply the `boundary_symbols` keyword to the `P4estMesh` constructor +# to select the boundaries from the available nodesets in the `.inp` file. +# If the `boundary_symbols` keyword is not supplied, all boundaries will be assigned to the default set `:all`. + +# ## Package versions + +# These results were obtained using the following versions. + +using InteractiveUtils +versioninfo() + +using Pkg +Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots", "Download"], + mode=PKGMODE_MANIFEST) diff --git a/docs/make.jl b/docs/make.jl index df8ac04be12..dee87371bd1 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -65,6 +65,7 @@ files = [ "Adaptive mesh refinement" => "adaptive_mesh_refinement.jl", "Structured mesh with curvilinear mapping" => "structured_mesh_mapping.jl", "Unstructured meshes with HOHQMesh.jl" => "hohqmesh_tutorial.jl", + "P4est mesh from gmsh" => "p4est_from_gmsh.jl", # Topic: other stuff "Explicit time stepping" => "time_stepping.jl", "Differentiable programming" => "differentiable_programming.jl", diff --git a/docs/src/meshes/p4est_mesh.md b/docs/src/meshes/p4est_mesh.md index 1e5d782ebb6..db75346cab3 100644 --- a/docs/src/meshes/p4est_mesh.md +++ b/docs/src/meshes/p4est_mesh.md @@ -55,7 +55,7 @@ This heading is used to indicate to the mesh constructor which of the above mapp create a curvilinear mesh. If the Abaqus file header is **not** present then the `P4estMesh` is created with the first strategy above. -#### List of corner nodes +#### List of corner nodes Next, prefaced with `*NODE`, comes a list of the physical `(x,y,z)` coordinates of all the corners. The first integer in the list of the corners provides its id number. @@ -71,7 +71,7 @@ Thus, for the two-dimensional example mesh this block of corner information is 7, 3.0, -1.0, 0.0 ``` -#### List of elements +#### List of elements The element connectivity is given after the list of corners. The header for this information block is ``` @@ -98,7 +98,9 @@ The construction of the element neighbor ids and identifying physical boundary s directly from the [`p4est`](https://github.com/cburstedde/p4est) library. For example, the neighbor connectivity is created in the mesh constructor using the wrapper `read_inp_p4est` function. -#### HOHQMesh boundary information +#### Encoding of boundaries + +##### HOHQMesh boundary information If present, any additional information in the mesh file that was created by `HOHQMesh` is prefaced with `** ` to make it an Abaqus comment. @@ -230,8 +232,38 @@ For completeness, we provide the entire Abaqus mesh file for the example mesh in ** Bottom --- Right --- ``` +##### Standard Abaqus format boundary information + +As an alternative to an Abaqus mesh generated by `HOHQMesh`, `.inp` files with boundary information encoded as nodesets `*NSET,NSET=` can be used to construct a `p4est` mesh. +This is especially useful for usage of existing meshes (consisting of bilinear elements) which could stem from the popular [`gmsh`](https://gmsh.info/) meshing software. + +In addition to the list of [nodes](#nodes) and [elements](#elements) given above, there are nodesets of the form +``` +*NSET,NSET=PhysicalLine1 +1, 4, 52, 53, 54, 55, 56, 57, 58, +``` +present which are used to associate the edges defined through their corner nodes with a label. In this case it is called `PhysicalLine1`. +By looping over every element and its associated edges, consisting of two nodes, we query the read in `NSET`s if the current node pair is present. + +To prevent that every nodeset following `*NSET,NSET=` is treated as a boundary, the user must supply a `boundary_symbols` keyword to the [`P4estMesh`](@ref) constructor: + +```julia +boundary_symbols = [:PhysicalLine1] + +mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) +``` +By doing so, only nodesets with a label present in `boundary_symbols` are treated as physical boundaries. +Other nodesets that could be used for diagnostics are not treated as external boundaries. +Note that there is a leading colon `:` compared to the label in the `.inp` mesh file. +This is required to turn the label into a [`Symbol`](https://docs.julialang.org/en/v1/manual/metaprogramming/#Symbols). + +A 2D example for this mesh, which is read-in for an unstructured mesh file created with `gmsh`, is presented in +`examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl`. + ### Mesh in three spatial dimensions +#### `HOHQMesh`-Extended Abaqus format + The 3D Abaqus file format with high-order boundary information from `HOHQMesh` is very similar to the 2D version discussed above. There are only three changes: @@ -346,4 +378,222 @@ transfinite map of the straight sided hexahedral element to find \mathbf{X}(\boldsymbol{\xi}) = \boldsymbol\Sigma(\boldsymbol{\xi}) - \mathcal{C}_{\texttt{edge}}(\boldsymbol{\xi}) + \mathbf{X}_{linear}(\boldsymbol{\xi}). +``` + +#### Construction from standard Abaqus + +Also for a mesh in standard Abaqus format there are no qualitative changes when going from 2D to 3D. +The most notable difference is that boundaries are formed in 3D by faces defined by four nodes while in 2D boundaries are edges consisting of two elements. +A simple mesh file, which is used also in `examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl`, is given below: +``` +*Heading + +*NODE +1, -2, 0, 0 +2, -1, 0, 0 +3, -1, 1, 0 +4, -2, 1, 0 +5, -2, 0, 1 +6, -1, 0, 1 +7, -1, 1, 1 +8, -2, 1, 1 +9, -1.75, 1, 0 +10, -1.5, 1, 0 +11, -1.25, 1, 0 +12, -1, 0.75000000000035, 0 +13, -1, 0.50000000000206, 0 +14, -1, 0.25000000000104, 0 +15, -1.25, 0, 0 +16, -1.5, 0, 0 +17, -1.75, 0, 0 +18, -2, 0.24999999999941, 0 +19, -2, 0.49999999999869, 0 +20, -2, 0.74999999999934, 0 +21, -1.75, 0, 1 +22, -1.5, 0, 1 +23, -1.25, 0, 1 +24, -1, 0.24999999999941, 1 +25, -1, 0.49999999999869, 1 +26, -1, 0.74999999999934, 1 +27, -1.25, 1, 1 +28, -1.5, 1, 1 +29, -1.75, 1, 1 +30, -2, 0.75000000000035, 1 +31, -2, 0.50000000000206, 1 +32, -2, 0.25000000000104, 1 +33, -2, 0, 0.24999999999941 +34, -2, 0, 0.49999999999869 +35, -2, 0, 0.74999999999934 +36, -2, 1, 0.24999999999941 +37, -2, 1, 0.49999999999869 +38, -2, 1, 0.74999999999934 +39, -1, 0, 0.24999999999941 +40, -1, 0, 0.49999999999869 +41, -1, 0, 0.74999999999934 +42, -1, 1, 0.24999999999941 +43, -1, 1, 0.49999999999869 +44, -1, 1, 0.74999999999934 +45, -1.25, 0.25000000000063, 0 +46, -1.25, 0.50000000000122, 0 +47, -1.25, 0.7500000000001, 0 +48, -1.5, 0.25000000000023, 0 +49, -1.5, 0.50000000000038, 0 +50, -1.5, 0.74999999999984, 0 +51, -1.75, 0.24999999999982, 0 +52, -1.75, 0.49999999999953, 0 +53, -1.75, 0.74999999999959, 0 +54, -1.75, 0.25000000000063, 1 +55, -1.75, 0.50000000000122, 1 +56, -1.75, 0.7500000000001, 1 +57, -1.5, 0.25000000000023, 1 +58, -1.5, 0.50000000000038, 1 +59, -1.5, 0.74999999999984, 1 +60, -1.25, 0.24999999999982, 1 +61, -1.25, 0.49999999999953, 1 +62, -1.25, 0.74999999999959, 1 +63, -2, 0.24999999999982, 0.24999999999941 +64, -2, 0.49999999999953, 0.24999999999941 +65, -2, 0.74999999999959, 0.24999999999941 +66, -2, 0.25000000000023, 0.49999999999869 +67, -2, 0.50000000000038, 0.49999999999869 +68, -2, 0.74999999999984, 0.49999999999869 +69, -2, 0.25000000000063, 0.74999999999934 +70, -2, 0.50000000000122, 0.74999999999934 +71, -2, 0.7500000000001, 0.74999999999934 +72, -1.25, 1, 0.74999999999934 +73, -1.25, 1, 0.49999999999869 +74, -1.25, 1, 0.24999999999941 +75, -1.5, 1, 0.74999999999934 +76, -1.5, 1, 0.49999999999869 +77, -1.5, 1, 0.24999999999941 +78, -1.75, 1, 0.74999999999934 +79, -1.75, 1, 0.49999999999869 +80, -1.75, 1, 0.24999999999941 +81, -1, 0.25000000000063, 0.24999999999941 +82, -1, 0.50000000000122, 0.24999999999941 +83, -1, 0.7500000000001, 0.24999999999941 +84, -1, 0.25000000000023, 0.49999999999869 +85, -1, 0.50000000000038, 0.49999999999869 +86, -1, 0.74999999999984, 0.49999999999869 +87, -1, 0.24999999999982, 0.74999999999934 +88, -1, 0.49999999999953, 0.74999999999934 +89, -1, 0.74999999999959, 0.74999999999934 +90, -1.75, 0, 0.74999999999934 +91, -1.75, 0, 0.49999999999869 +92, -1.75, 0, 0.24999999999941 +93, -1.5, 0, 0.74999999999934 +94, -1.5, 0, 0.49999999999869 +95, -1.5, 0, 0.24999999999941 +96, -1.25, 0, 0.74999999999934 +97, -1.25, 0, 0.49999999999869 +98, -1.25, 0, 0.24999999999941 +99, -1.75, 0.25000000000043, 0.74999999999934 +100, -1.75, 0.25000000000023, 0.49999999999869 +101, -1.75, 0.25000000000002, 0.24999999999941 +102, -1.75, 0.5000000000008, 0.74999999999934 +103, -1.75, 0.50000000000038, 0.49999999999869 +104, -1.75, 0.49999999999995, 0.24999999999941 +105, -1.75, 0.74999999999997, 0.74999999999934 +106, -1.75, 0.74999999999984, 0.49999999999869 +107, -1.75, 0.74999999999972, 0.24999999999941 +108, -1.5, 0.25000000000023, 0.74999999999934 +109, -1.5, 0.25000000000023, 0.49999999999869 +110, -1.5, 0.25000000000023, 0.24999999999941 +111, -1.5, 0.50000000000038, 0.74999999999934 +112, -1.5, 0.50000000000038, 0.49999999999869 +113, -1.5, 0.50000000000038, 0.24999999999941 +114, -1.5, 0.74999999999984, 0.74999999999934 +115, -1.5, 0.74999999999984, 0.49999999999869 +116, -1.5, 0.74999999999984, 0.24999999999941 +117, -1.25, 0.25000000000002, 0.74999999999934 +118, -1.25, 0.25000000000023, 0.49999999999869 +119, -1.25, 0.25000000000043, 0.24999999999941 +120, -1.25, 0.49999999999995, 0.74999999999934 +121, -1.25, 0.50000000000038, 0.49999999999869 +122, -1.25, 0.5000000000008, 0.24999999999941 +123, -1.25, 0.74999999999972, 0.74999999999934 +124, -1.25, 0.74999999999984, 0.49999999999869 +125, -1.25, 0.74999999999997, 0.24999999999941 +******* E L E M E N T S ************* +*ELEMENT, type=C3D8, ELSET=Volume1 +153, 54, 21, 5, 32, 99, 90, 35, 69 +154, 99, 90, 35, 69, 100, 91, 34, 66 +155, 100, 91, 34, 66, 101, 92, 33, 63 +156, 101, 92, 33, 63, 51, 17, 1, 18 +157, 55, 54, 32, 31, 102, 99, 69, 70 +158, 102, 99, 69, 70, 103, 100, 66, 67 +159, 103, 100, 66, 67, 104, 101, 63, 64 +160, 104, 101, 63, 64, 52, 51, 18, 19 +161, 56, 55, 31, 30, 105, 102, 70, 71 +162, 105, 102, 70, 71, 106, 103, 67, 68 +163, 106, 103, 67, 68, 107, 104, 64, 65 +164, 107, 104, 64, 65, 53, 52, 19, 20 +165, 29, 56, 30, 8, 78, 105, 71, 38 +166, 78, 105, 71, 38, 79, 106, 68, 37 +167, 79, 106, 68, 37, 80, 107, 65, 36 +168, 80, 107, 65, 36, 9, 53, 20, 4 +169, 57, 22, 21, 54, 108, 93, 90, 99 +170, 108, 93, 90, 99, 109, 94, 91, 100 +171, 109, 94, 91, 100, 110, 95, 92, 101 +172, 110, 95, 92, 101, 48, 16, 17, 51 +173, 58, 57, 54, 55, 111, 108, 99, 102 +174, 111, 108, 99, 102, 112, 109, 100, 103 +175, 112, 109, 100, 103, 113, 110, 101, 104 +176, 113, 110, 101, 104, 49, 48, 51, 52 +177, 59, 58, 55, 56, 114, 111, 102, 105 +178, 114, 111, 102, 105, 115, 112, 103, 106 +179, 115, 112, 103, 106, 116, 113, 104, 107 +180, 116, 113, 104, 107, 50, 49, 52, 53 +181, 28, 59, 56, 29, 75, 114, 105, 78 +182, 75, 114, 105, 78, 76, 115, 106, 79 +183, 76, 115, 106, 79, 77, 116, 107, 80 +184, 77, 116, 107, 80, 10, 50, 53, 9 +185, 60, 23, 22, 57, 117, 96, 93, 108 +186, 117, 96, 93, 108, 118, 97, 94, 109 +187, 118, 97, 94, 109, 119, 98, 95, 110 +188, 119, 98, 95, 110, 45, 15, 16, 48 +189, 61, 60, 57, 58, 120, 117, 108, 111 +190, 120, 117, 108, 111, 121, 118, 109, 112 +191, 121, 118, 109, 112, 122, 119, 110, 113 +192, 122, 119, 110, 113, 46, 45, 48, 49 +193, 62, 61, 58, 59, 123, 120, 111, 114 +194, 123, 120, 111, 114, 124, 121, 112, 115 +195, 124, 121, 112, 115, 125, 122, 113, 116 +196, 125, 122, 113, 116, 47, 46, 49, 50 +197, 27, 62, 59, 28, 72, 123, 114, 75 +198, 72, 123, 114, 75, 73, 124, 115, 76 +199, 73, 124, 115, 76, 74, 125, 116, 77 +200, 74, 125, 116, 77, 11, 47, 50, 10 +201, 24, 6, 23, 60, 87, 41, 96, 117 +202, 87, 41, 96, 117, 84, 40, 97, 118 +203, 84, 40, 97, 118, 81, 39, 98, 119 +204, 81, 39, 98, 119, 14, 2, 15, 45 +205, 25, 24, 60, 61, 88, 87, 117, 120 +206, 88, 87, 117, 120, 85, 84, 118, 121 +207, 85, 84, 118, 121, 82, 81, 119, 122 +208, 82, 81, 119, 122, 13, 14, 45, 46 +209, 26, 25, 61, 62, 89, 88, 120, 123 +210, 89, 88, 120, 123, 86, 85, 121, 124 +211, 86, 85, 121, 124, 83, 82, 122, 125 +212, 83, 82, 122, 125, 12, 13, 46, 47 +213, 7, 26, 62, 27, 44, 89, 123, 72 +214, 44, 89, 123, 72, 43, 86, 124, 73 +215, 43, 86, 124, 73, 42, 83, 125, 74 +216, 42, 83, 125, 74, 3, 12, 47, 11 +*NSET,NSET=PhysicalSurface1 +1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +11, 12, 13, 14, 15, 16, 17, 18, 19, 20, +21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +31, 32, 33, 34, 35, 36, 37, 38, 45, 46, +47, 48, 49, 50, 51, 52, 53, 54, 55, 56, +57, 58, 59, 60, 61, 62, 63, 64, 65, 66, +67, 68, 69, 70, 71, +*NSET,NSET=PhysicalSurface2 +1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +11, 12, 13, 14, 15, 16, 17, 21, 22, 23, +24, 25, 26, 27, 28, 29, 33, 34, 35, 36, +37, 38, 39, 40, 41, 42, 43, 44, 72, 73, +74, 75, 76, 77, 78, 79, 80, 81, 82, 83, +84, 85, 86, 87, 88, 89, 90, 91, 92, 93, +94, 95, 96, 97, 98, ``` \ No newline at end of file diff --git a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl new file mode 100644 index 00000000000..6673053d88f --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl @@ -0,0 +1,108 @@ + +using Trixi +using OrdinaryDiffEq +using Downloads: download + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +@inline function initial_condition_mach2_flow(x, t, equations::CompressibleEulerEquations2D) + # set the freestream flow parameters + rho_freestream = 1.4 + v1 = 2.0 + v2 = 0.0 + p_freestream = 1.0 + + prim = SVector(rho_freestream, v1, v2, p_freestream) + return prim2cons(prim, equations) +end + +initial_condition = initial_condition_mach2_flow + +# Supersonic inflow boundary condition. +# Calculate the boundary flux entirely from the external solution state, i.e., set +# external solution state values for everything entering the domain. +@inline function boundary_condition_supersonic_inflow(u_inner, + normal_direction::AbstractVector, + x, t, surface_flux_function, + equations::CompressibleEulerEquations2D) + u_boundary = initial_condition_mach2_flow(x, t, equations) + flux = Trixi.flux(u_boundary, normal_direction, equations) + + return flux +end + +# Supersonic outflow boundary condition. +# Calculate the boundary flux entirely from the internal solution state. Analogous to supersonic inflow +# except all the solution state values are set from the internal solution as everything leaves the domain +@inline function boundary_condition_supersonic_outflow(u_inner, + normal_direction::AbstractVector, x, + t, + surface_flux_function, + equations::CompressibleEulerEquations2D) + flux = Trixi.flux(u_inner, normal_direction, equations) + + return flux +end + +polydeg = 3 + +surface_flux = flux_lax_friedrichs +volume_flux = flux_ranocha + +basis = LobattoLegendreBasis(polydeg) +shock_indicator = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) +volume_integral = VolumeIntegralShockCapturingHG(shock_indicator; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + +# DG Solver +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +# Mesh generated from the following gmsh geometry input file: +# https://gist.githubusercontent.com/DanielDoehring/5ade6d93629f0d8c23a598812dbee2a9/raw/d2bc904fe92146eae1a36156e7f5c535dc1a80f1/NACA6412.geo +mesh_file = joinpath(@__DIR__, "mesh_NACA6412.inp") +isfile(mesh_file) || + download("https://gist.githubusercontent.com/DanielDoehring/e2a389f04f1e37b33819b9637e8ee4c3/raw/4bf7607a2ce4432fdb5cb87d5e264949b11bd5d7/mesh_NACA6412.inp", + mesh_file) + +boundary_symbols = [:PhysicalLine1, :PhysicalLine2, :PhysicalLine3, :PhysicalLine4] + +mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) + +boundary_conditions = Dict(:PhysicalLine1 => boundary_condition_supersonic_inflow, # Left boundary + :PhysicalLine2 => boundary_condition_supersonic_outflow, # Right boundary + :PhysicalLine3 => boundary_condition_slip_wall, # Airfoil + :PhysicalLine4 => boundary_condition_supersonic_outflow) # Top and bottom boundary + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +tspan = (0.0, 5.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 4.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + stepsize_callback) + +# Run the simulation +############################################################################### +sol = solve(ode, SSPRK104(; thread = OrdinaryDiffEq.True()); + dt = 1.0, # overwritten by the `stepsize_callback` + callback = callbacks); + +summary_callback() # print the timer summary diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl new file mode 100644 index 00000000000..bdc4da26c1f --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl @@ -0,0 +1,60 @@ + +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_constant + +polydeg = 3 +solver = DGSEM(polydeg = polydeg, surface_flux = flux_lax_friedrichs) + +############################################################################### +# Get the uncurved mesh from a file (downloads the file if not available locally) + +default_mesh_file = joinpath(@__DIR__, "mesh_cube_with_boundaries.inp") +isfile(default_mesh_file) || + download("https://gist.githubusercontent.com/DanielDoehring/710eab379fe3042dc08af6f2d1076e49/raw/38e9803bc0dab9b32a61d9542feac5343c3e6f4b/mesh_cube_with_boundaries.inp", + default_mesh_file) +mesh_file = default_mesh_file + +boundary_symbols = [:PhysicalSurface1, :PhysicalSurface2] + +mesh = P4estMesh{3}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) + +boundary_conditions = Dict(:PhysicalSurface1 => BoundaryConditionDirichlet(initial_condition), + :PhysicalSurface2 => BoundaryConditionDirichlet(initial_condition)) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.5) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl index 60db285e04f..c5d39ef00c0 100644 --- a/src/meshes/p4est_mesh.jl +++ b/src/meshes/p4est_mesh.jl @@ -289,7 +289,8 @@ end P4estMesh{NDIMS}(meshfile::String; mapping=nothing, polydeg=1, RealT=Float64, initial_refinement_level=0, unsaved_changes=true, - p4est_partition_allow_for_coarsening=true) + p4est_partition_allow_for_coarsening=true, + boundary_symbols = nothing) Main mesh constructor for the `P4estMesh` that imports an unstructured, conforming mesh from an Abaqus mesh file (`.inp`). Each element of the conforming mesh parsed @@ -310,8 +311,9 @@ To create a curved unstructured mesh `P4estMesh` two strategies are available: straight-sided from the information parsed from the `meshfile`. If a mapping function is specified then it computes the mapped tree coordinates via polynomial interpolants with degree `polydeg`. The mesh created by this function will only - have one boundary `:all`, as distinguishing different physical boundaries is - non-trivial. + have one boundary `:all` if `boundary_symbols` is not specified. + If `boundary_symbols` is specified the mesh file will be parsed for nodesets defining + the boundary nodes from which boundary edges (2D) and faces (3D) will be assigned. Note that the `mapping` and `polydeg` keyword arguments are only used by the `p4est_mesh_from_standard_abaqus` function. The `p4est_mesh_from_hohqmesh_abaqus` function obtains the mesh `polydeg` directly from the `meshfile` @@ -345,11 +347,14 @@ For example, if a two-dimensional base mesh contains 25 elements then setting - `p4est_partition_allow_for_coarsening::Bool`: Must be `true` when using AMR to make mesh adaptivity independent of domain partitioning. Should be `false` for static meshes to permit more fine-grained partitioning. +- `boundary_symbols::Vector{Symbol}`: A vector of symbols that correspond to the boundary names in the `meshfile`. + If `nothing` is passed then all boundaries are named `:all`. """ function P4estMesh{NDIMS}(meshfile::String; mapping = nothing, polydeg = 1, RealT = Float64, initial_refinement_level = 0, unsaved_changes = true, - p4est_partition_allow_for_coarsening = true) where {NDIMS} + p4est_partition_allow_for_coarsening = true, + boundary_symbols = nothing) where {NDIMS} # Prevent `p4est` from crashing Julia if the file doesn't exist @assert isfile(meshfile) @@ -373,7 +378,8 @@ function P4estMesh{NDIMS}(meshfile::String; polydeg, initial_refinement_level, NDIMS, - RealT) + RealT, + boundary_symbols) end return P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, @@ -444,7 +450,8 @@ end # the `mapping` passed to this function using polynomial interpolants of degree `polydeg`. All boundary # names are given the name `:all`. function p4est_mesh_from_standard_abaqus(meshfile, mapping, polydeg, - initial_refinement_level, n_dimensions, RealT) + initial_refinement_level, n_dimensions, RealT, + boundary_symbols) # Create the mesh connectivity using `p4est` connectivity = read_inp_p4est(meshfile, Val(n_dimensions)) connectivity_pw = PointerWrapper(connectivity) @@ -469,12 +476,215 @@ function p4est_mesh_from_standard_abaqus(meshfile, mapping, polydeg, p4est = new_p4est(connectivity, initial_refinement_level) - # There's no simple and generic way to distinguish boundaries. Name all of them :all. - boundary_names = fill(:all, 2 * n_dimensions, n_trees) + if boundary_symbols === nothing + # There's no simple and generic way to distinguish boundaries without any information given. + # Name all of them :all. + boundary_names = fill(:all, 2 * n_dimensions, n_trees) + else # Boundary information given + # Read in nodes belonging to boundaries + node_set_dict = parse_node_sets(meshfile, boundary_symbols) + # Read in all elements with associated nodes to specify the boundaries + element_node_matrix = parse_elements(meshfile, n_trees, n_dimensions) + + # Initialize boundary information matrix with symbol for no boundary / internal connection + boundary_names = fill(Symbol("---"), 2 * n_dimensions, n_trees) + + # Fill `boundary_names` such that it can be processed by p4est + assign_boundaries_standard_abaqus!(boundary_names, n_trees, + element_node_matrix, node_set_dict, + Val(n_dimensions)) + end return p4est, tree_node_coordinates, nodes, boundary_names end +function parse_elements(meshfile, n_trees, n_dims) + @assert n_dims in (2, 3) "Only 2D and 3D meshes are supported" + # Valid element types (that can be processed by p4est) based on dimension + element_types = n_dims == 2 ? + ["*ELEMENT, type=CPS4", "*ELEMENT, type=C2D4", + "*ELEMENT, type=S4"] : ["*ELEMENT, type=C3D8"] + # 2D quads: 4 nodes + element index, 3D hexes: 8 nodes + element index + expected_content_length = n_dims == 2 ? 5 : 9 + + element_node_matrix = Matrix{Int64}(undef, n_trees, expected_content_length - 1) + el_list_follows = false + tree_id = 1 + + open(meshfile, "r") do file + for line in eachline(file) + if any(startswith(line, el_type) for el_type in element_types) + el_list_follows = true + elseif el_list_follows + content = split(line, ",") + if length(content) == expected_content_length # Check that we still read in connectivity data + content_int = parse.(Int64, content) + # Add constituent nodes to the element_node_matrix. + # Important: Do not use index from the Abaqus file, but the one from p4est. + element_node_matrix[tree_id, :] = content_int[2:end] # First entry is element id + tree_id += 1 + else # Processed all elements for this ELSET + el_list_follows = false + end + end + end + end + + return element_node_matrix +end + +function parse_node_sets(meshfile, boundary_symbols) + nodes_dict = Dict{Symbol, Vector{Int64}}() + current_symbol = nothing + current_nodes = Int64[] + + open(meshfile, "r") do file + for line in eachline(file) + # Check if the line contains nodes assembled in a special set, i.e., a physical boundary + if startswith(line, "*NSET,NSET=") + # Safe the previous nodeset + if current_symbol !== nothing + nodes_dict[current_symbol] = current_nodes + end + + current_symbol = Symbol(split(line, "=")[2]) + if current_symbol in boundary_symbols + # New nodeset + current_nodes = Int64[] + else # Read only boundary node sets + current_symbol = nothing + end + elseif current_symbol !== nothing # Read only if there was already a nodeset specified + try # Check if line contains nodes + # There is always a trailing comma, remove the corresponding empty string + append!(current_nodes, parse.(Int64, split(line, ",")[1:(end - 1)])) + catch # Something different, stop reading in nodes + # If parsing fails, set current_symbol to nothing + nodes_dict[current_symbol] = current_nodes + current_symbol = nothing + end + end + end + # Safe the previous nodeset + if current_symbol !== nothing + nodes_dict[current_symbol] = current_nodes + end + end + + for symbol in boundary_symbols + if !haskey(nodes_dict, symbol) + @warn "No nodes found for nodeset :" * "$symbol" * " !" + end + end + + return nodes_dict +end + +# This function assigns the edges of elements to boundaries by +# checking if the nodes that define the edges are part of nodesets which correspond to boundaries. +function assign_boundaries_standard_abaqus!(boundary_names, n_trees, + element_node_matrix, node_set_dict, + ::Val{2}) # 2D version + for tree in 1:n_trees + tree_nodes = element_node_matrix[tree, :] + # For node labeling, see + # https://docs.software.vt.edu/abaqusv2022/English/SIMACAEELMRefMap/simaelm-r-2delem.htm#simaelm-r-2delem-t-nodedef1 + # and search for "Node ordering and face numbering on elements" + for boundary in keys(node_set_dict) # Loop over specified boundaries + # Check bottom edge + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[2] in node_set_dict[boundary] + # Bottom boundary is position 3 in p4est indexing + boundary_names[3, tree] = boundary + end + # Check right edge + if tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[3] in node_set_dict[boundary] + # Right boundary is position 2 in p4est indexing + boundary_names[2, tree] = boundary + end + # Check top edge + if tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] + # Top boundary is position 4 in p4est indexing + boundary_names[4, tree] = boundary + end + # Check left edge + if tree_nodes[4] in node_set_dict[boundary] && + tree_nodes[1] in node_set_dict[boundary] + # Left boundary is position 1 in p4est indexing + boundary_names[1, tree] = boundary + end + end + end + + return boundary_names +end + +# This function assigns the edges of elements to boundaries by +# checking if the nodes that define the faces are part of nodesets which correspond to boundaries. +function assign_boundaries_standard_abaqus!(boundary_names, n_trees, + element_node_matrix, node_set_dict, + ::Val{3}) # 3D version + for tree in 1:n_trees + tree_nodes = element_node_matrix[tree, :] + # For node labeling, see + # https://web.mit.edu/calculix_v2.7/CalculiX/ccx_2.7/doc/ccx/node26.html + for boundary in keys(node_set_dict) # Loop over specified boundaries + # Check "front face" (y_min) + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[5] in node_set_dict[boundary] && + tree_nodes[6] in node_set_dict[boundary] + # Front face is position 3 in p4est indexing + boundary_names[3, tree] = boundary + end + # Check "back face" (y_max) + if tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] && + tree_nodes[7] in node_set_dict[boundary] && + tree_nodes[8] in node_set_dict[boundary] + # Front face is position 4 in p4est indexing + boundary_names[4, tree] = boundary + end + # Check "left face" (x_min) + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] && + tree_nodes[5] in node_set_dict[boundary] && + tree_nodes[8] in node_set_dict[boundary] + # Left face is position 1 in p4est indexing + boundary_names[1, tree] = boundary + end + # Check "right face" (x_max) + if tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[6] in node_set_dict[boundary] && + tree_nodes[7] in node_set_dict[boundary] + # Right face is position 2 in p4est indexing + boundary_names[2, tree] = boundary + end + # Check "bottom face" (z_min) + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] + # Bottom face is position 5 in p4est indexing + boundary_names[5, tree] = boundary + end + # Check "top face" (z_max) + if tree_nodes[5] in node_set_dict[boundary] && + tree_nodes[6] in node_set_dict[boundary] && + tree_nodes[7] in node_set_dict[boundary] && + tree_nodes[8] in node_set_dict[boundary] + # Top face is position 6 in p4est indexing + boundary_names[6, tree] = boundary + end + end + end + + return boundary_names +end + """ P4estMeshCubedSphere(trees_per_face_dimension, layers, inner_radius, thickness; polydeg, RealT=Float64, diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index cebc2917d52..b034091a175 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -391,6 +391,27 @@ end end end +@trixi_testset "elixir_euler_NACA6412airfoil_mach2.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_NACA6412airfoil_mach2.jl"), + l2=[ + 1.9752162683735258e-9, 3.150450205812513e-9, + 1.8885499402935914e-9, 7.273629602920966e-9, + ], + linf=[ + 6.007577890709825e-7, 1.005273289944597e-6, + 5.948514542597182e-7, 2.3111764217986774e-6, + ], + tspan=(0.0, 0.1)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_eulergravity_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulergravity_convergence.jl"), l2=[ diff --git a/test/test_p4est_3d.jl b/test/test_p4est_3d.jl index dc5d32b5a04..ea7d9193add 100644 --- a/test/test_p4est_3d.jl +++ b/test/test_p4est_3d.jl @@ -234,6 +234,29 @@ end end end +@trixi_testset "elixir_euler_free_stream_boundaries.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_free_stream_boundaries.jl"), + l2=[ + 6.530157034651212e-16, 1.6057829680004379e-15, + 3.31107455378537e-15, 3.908829498281281e-15, + 5.048390610424672e-15, + ], + linf=[ + 4.884981308350689e-15, 1.1921019726912618e-14, + 1.5432100042289676e-14, 2.298161660974074e-14, + 6.039613253960852e-14, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_euler_free_stream_extruded.jl with HLLC FLux" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream_extruded.jl"), l2=[ From c021c4816b8f0d8aa81c990439771f26684f93b0 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 25 Jan 2024 14:34:19 +0100 Subject: [PATCH 04/22] set version to v0.6.7 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index f246bdfdab4..8532e47f761 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.7-pre" +version = "0.6.7" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 246dc5be204c8937f077a1d12b5603bb70138357 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 25 Jan 2024 14:34:36 +0100 Subject: [PATCH 05/22] set development version to v0.6.8-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 8532e47f761..1a0aa0103dc 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.7" +version = "0.6.8-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 70d365f405c2494e314277f22e63dcc32d04f095 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 25 Jan 2024 16:09:57 +0100 Subject: [PATCH 06/22] Fix Docs rendering: Avoid Markdown hyperlink (#1814) * Avoid hyperlink in doc * Update docs/src/meshes/p4est_mesh.md Co-authored-by: Michael Schlottke-Lakemper * Apply suggestions from code review Co-authored-by: Michael Schlottke-Lakemper --------- Co-authored-by: Michael Schlottke-Lakemper --- docs/src/meshes/p4est_mesh.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/src/meshes/p4est_mesh.md b/docs/src/meshes/p4est_mesh.md index db75346cab3..3b35ffcad6f 100644 --- a/docs/src/meshes/p4est_mesh.md +++ b/docs/src/meshes/p4est_mesh.md @@ -55,7 +55,7 @@ This heading is used to indicate to the mesh constructor which of the above mapp create a curvilinear mesh. If the Abaqus file header is **not** present then the `P4estMesh` is created with the first strategy above. -#### List of corner nodes +#### [List of corner nodes](@id corner-node-list) Next, prefaced with `*NODE`, comes a list of the physical `(x,y,z)` coordinates of all the corners. The first integer in the list of the corners provides its id number. @@ -71,7 +71,7 @@ Thus, for the two-dimensional example mesh this block of corner information is 7, 3.0, -1.0, 0.0 ``` -#### List of elements +#### [List of elements](@id element-list) The element connectivity is given after the list of corners. The header for this information block is ``` @@ -237,7 +237,7 @@ For completeness, we provide the entire Abaqus mesh file for the example mesh in As an alternative to an Abaqus mesh generated by `HOHQMesh`, `.inp` files with boundary information encoded as nodesets `*NSET,NSET=` can be used to construct a `p4est` mesh. This is especially useful for usage of existing meshes (consisting of bilinear elements) which could stem from the popular [`gmsh`](https://gmsh.info/) meshing software. -In addition to the list of [nodes](#nodes) and [elements](#elements) given above, there are nodesets of the form +In addition to the list of [nodes](@ref corner-node-list) and [elements](@ref element-list) given above, there are nodesets of the form ``` *NSET,NSET=PhysicalLine1 1, 4, 52, 53, 54, 55, 56, 57, 58, From 367881bb713f8671acc1a42ae28a3d68f3000935 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Mon, 29 Jan 2024 09:29:02 +0100 Subject: [PATCH 07/22] Correct NACA6412 BC assignment (#1815) --- docs/literate/src/files/p4est_from_gmsh.jl | 8 ++++---- .../p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl | 4 ++-- test/test_p4est_2d.jl | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/literate/src/files/p4est_from_gmsh.jl b/docs/literate/src/files/p4est_from_gmsh.jl index 356339cdd47..abfe70eebc4 100644 --- a/docs/literate/src/files/p4est_from_gmsh.jl +++ b/docs/literate/src/files/p4est_from_gmsh.jl @@ -347,8 +347,8 @@ end #hide #md # // labeling of the boundary parts # Physical Line(1) = {4}; // Inflow. Label in Abaqus .inp file: PhysicalLine1 # Physical Line(2) = {2}; // Outflow. Label in Abaqus .inp file: PhysicalLine2 -# Physical Line(3) = {1, 3}; // Airfoil. Label in Abaqus .inp file: PhysicalLine3 -# Physical Line(4) = {5, 6}; //Upper and lower wall/farfield/... Label in Abaqus .inp file: PhysicalLine4 +# Physical Line(3) = {1, 3}; // Upper and lower wall/farfield/... Label in Abaqus .inp file: PhysicalLine3 +# Physical Line(4) = {5, 6}; // Airfoil. Label in Abaqus .inp file: PhysicalLine4 # ``` # which are crucial for the correct assignment of boundary conditions in `Trixi.jl`. # In particular, it is the responsibility of a user to keep track on the physical boundary names between the mesh generation and assignment of boundary condition functions in an elixir. @@ -437,8 +437,8 @@ end #hide #md # # boundary_conditions = Dict(:PhysicalLine1 => boundary_condition_supersonic_inflow, # Left boundary # :PhysicalLine2 => boundary_condition_supersonic_outflow, # Right boundary -# :PhysicalLine3 => boundary_condition_slip_wall, # Airfoil -# :PhysicalLine4 => boundary_condition_supersonic_outflow) # Top and bottom boundary +# :PhysicalLine3 => boundary_condition_supersonic_outflow, # Top and bottom boundary +# :PhysicalLine4 => boundary_condition_slip_wall) # Airfoil # # semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, # boundary_conditions = boundary_conditions) diff --git a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl index 6673053d88f..fcd2ca00e10 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl @@ -79,8 +79,8 @@ mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_sy boundary_conditions = Dict(:PhysicalLine1 => boundary_condition_supersonic_inflow, # Left boundary :PhysicalLine2 => boundary_condition_supersonic_outflow, # Right boundary - :PhysicalLine3 => boundary_condition_slip_wall, # Airfoil - :PhysicalLine4 => boundary_condition_supersonic_outflow) # Top and bottom boundary + :PhysicalLine3 => boundary_condition_supersonic_outflow, # Top and bottom boundary + :PhysicalLine4 => boundary_condition_slip_wall) # Airfoil semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions = boundary_conditions) diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index b034091a175..121001b35ff 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -394,12 +394,12 @@ end @trixi_testset "elixir_euler_NACA6412airfoil_mach2.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_NACA6412airfoil_mach2.jl"), l2=[ - 1.9752162683735258e-9, 3.150450205812513e-9, - 1.8885499402935914e-9, 7.273629602920966e-9, + 0.19107654776276498, 0.3545913719444839, + 0.18492730895077583, 0.817927213517244, ], linf=[ - 6.007577890709825e-7, 1.005273289944597e-6, - 5.948514542597182e-7, 2.3111764217986774e-6, + 2.5397624311491946, 2.7075156425517917, 2.200980534211764, + 9.031153939238115, ], tspan=(0.0, 0.1)) # Ensure that we do not have excessive memory allocations From 38100d0a855b0a370c0fd65fbe9bfbe301f5d096 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Mon, 29 Jan 2024 09:44:45 +0100 Subject: [PATCH 08/22] set version to v0.6.8 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 1a0aa0103dc..29e36a46dc5 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.8-pre" +version = "0.6.8" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From fcf2652f8ce3f16ba640a66990134632c9f8d3d5 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Mon, 29 Jan 2024 09:44:57 +0100 Subject: [PATCH 09/22] set development version to v0.6.9-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 29e36a46dc5..0bbdec206d8 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.8" +version = "0.6.9-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From f4e6e494846784e513987a91d2248c04fd616029 Mon Sep 17 00:00:00 2001 From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> Date: Tue, 30 Jan 2024 11:07:31 +0100 Subject: [PATCH 10/22] (Thread-)Parallelize bounds check routine for subcell IDP limiting (#1736) * Implement parallel bounds check for IDP limiting * Add missing warning as "experimental" from last PR * Updating `idp_bounds_delta_threaded` for all bounds at once * Revise parallel memory structure * Using maximum instead of reduce * Expand vector length to fix False Sharing problem * Generalize stride size in vector * Add suggested comment --- src/callbacks_stage/subcell_bounds_check.jl | 10 ++- .../subcell_bounds_check_2d.jl | 81 ++++++++++++------- .../dgsem_tree/dg_2d_subcell_limiters.jl | 3 + src/solvers/dgsem_tree/subcell_limiters_2d.jl | 23 ++++-- 4 files changed, 80 insertions(+), 37 deletions(-) diff --git a/src/callbacks_stage/subcell_bounds_check.jl b/src/callbacks_stage/subcell_bounds_check.jl index d7e30ab1621..9f34a6b3b4b 100644 --- a/src/callbacks_stage/subcell_bounds_check.jl +++ b/src/callbacks_stage/subcell_bounds_check.jl @@ -118,7 +118,7 @@ end @inline function finalize_callback(callback::BoundsCheckCallback, semi, limiter::SubcellLimiterIDP) (; local_minmax, positivity) = limiter - (; idp_bounds_delta) = limiter.cache + (; idp_bounds_delta_global) = limiter.cache variables = varnames(cons2cons, semi.equations) println("─"^100) @@ -128,8 +128,10 @@ end for v in limiter.local_minmax_variables_cons v_string = string(v) println("$(variables[v]):") - println("-lower bound: ", idp_bounds_delta[Symbol(v_string, "_min")][2]) - println("-upper bound: ", idp_bounds_delta[Symbol(v_string, "_max")][2]) + println("- lower bound: ", + idp_bounds_delta_global[Symbol(v_string, "_min")]) + println("- upper bound: ", + idp_bounds_delta_global[Symbol(v_string, "_max")]) end end if positivity @@ -138,7 +140,7 @@ end continue end println(string(variables[v]) * ":\n- positivity: ", - idp_bounds_delta[Symbol(string(v), "_min")][2]) + idp_bounds_delta_global[Symbol(string(v), "_min")]) end end println("─"^100 * "\n") diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl index d52eb6edb9e..545d19b5136 100644 --- a/src/callbacks_stage/subcell_bounds_check_2d.jl +++ b/src/callbacks_stage/subcell_bounds_check_2d.jl @@ -10,26 +10,37 @@ time, iter, output_directory, save_errors) (; local_minmax, positivity) = solver.volume_integral.limiter (; variable_bounds) = limiter.cache.subcell_limiter_coefficients - (; idp_bounds_delta) = limiter.cache + (; idp_bounds_delta_local, idp_bounds_delta_global) = limiter.cache + + # Note: Accessing the threaded memory vector `idp_bounds_delta_local` with + # `deviation = idp_bounds_delta_local[key][Threads.threadid()]` causes critical performance + # issues due to False Sharing. + # Initializing a vector with n times the length and using every n-th entry fixes this + # problem and allows proper scaling: + # `deviation = idp_bounds_delta_local[key][n * Threads.threadid()]` + # Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)` + stride_size = div(128, sizeof(eltype(u))) # = n if local_minmax for v in limiter.local_minmax_variables_cons v_string = string(v) key_min = Symbol(v_string, "_min") key_max = Symbol(v_string, "_max") - deviation_min = idp_bounds_delta[key_min] - deviation_max = idp_bounds_delta[key_max] - for element in eachelement(solver, cache), j in eachnode(solver), - i in eachnode(solver) - - var = u[v, i, j, element] - deviation_min[1] = max(deviation_min[1], - variable_bounds[key_min][i, j, element] - var) - deviation_max[1] = max(deviation_max[1], - var - variable_bounds[key_max][i, j, element]) + deviation_min_threaded = idp_bounds_delta_local[key_min] + deviation_max_threaded = idp_bounds_delta_local[key_max] + @threaded for element in eachelement(solver, cache) + deviation_min = deviation_min_threaded[stride_size * Threads.threadid()] + deviation_max = deviation_max_threaded[stride_size * Threads.threadid()] + for j in eachnode(solver), i in eachnode(solver) + var = u[v, i, j, element] + deviation_min = max(deviation_min, + variable_bounds[key_min][i, j, element] - var) + deviation_max = max(deviation_max, + var - variable_bounds[key_max][i, j, element]) + end + deviation_min_threaded[stride_size * Threads.threadid()] = deviation_min + deviation_max_threaded[stride_size * Threads.threadid()] = deviation_max end - deviation_min[2] = max(deviation_min[2], deviation_min[1]) - deviation_max[2] = max(deviation_max[2], deviation_max[1]) end end if positivity @@ -38,17 +49,28 @@ continue end key = Symbol(string(v), "_min") - deviation = idp_bounds_delta[key] - for element in eachelement(solver, cache), j in eachnode(solver), - i in eachnode(solver) - - var = u[v, i, j, element] - deviation[1] = max(deviation[1], - variable_bounds[key][i, j, element] - var) + deviation_threaded = idp_bounds_delta_local[key] + @threaded for element in eachelement(solver, cache) + deviation = deviation_threaded[stride_size * Threads.threadid()] + for j in eachnode(solver), i in eachnode(solver) + var = u[v, i, j, element] + deviation = max(deviation, + variable_bounds[key][i, j, element] - var) + end + deviation_threaded[stride_size * Threads.threadid()] = deviation end - deviation[2] = max(deviation[2], deviation[1]) end end + + for (key, _) in idp_bounds_delta_local + # Calculate maximum deviations of all threads + idp_bounds_delta_local[key][stride_size] = maximum(idp_bounds_delta_local[key][stride_size * i] + for i in 1:Threads.nthreads()) + # Update global maximum deviations + idp_bounds_delta_global[key] = max(idp_bounds_delta_global[key], + idp_bounds_delta_local[key][stride_size]) + end + if save_errors # Print to output file open("$output_directory/deviations.txt", "a") do f @@ -56,8 +78,10 @@ if local_minmax for v in limiter.local_minmax_variables_cons v_string = string(v) - print(f, ", ", idp_bounds_delta[Symbol(v_string, "_min")][1], ", ", - idp_bounds_delta[Symbol(v_string, "_max")][1]) + print(f, ", ", + idp_bounds_delta_local[Symbol(v_string, "_min")][stride_size], + ", ", + idp_bounds_delta_local[Symbol(v_string, "_max")][stride_size]) end end if positivity @@ -65,14 +89,17 @@ if v in limiter.local_minmax_variables_cons continue end - print(f, ", ", idp_bounds_delta[Symbol(string(v), "_min")][1]) + print(f, ", ", + idp_bounds_delta_local[Symbol(string(v), "_min")][stride_size]) end end println(f) end - # Reset first entries of idp_bounds_delta - for (key, _) in idp_bounds_delta - idp_bounds_delta[key][1] = zero(eltype(idp_bounds_delta[key][1])) + # Reset local maximum deviations + for (key, _) in idp_bounds_delta_local + for i in 1:Threads.nthreads() + idp_bounds_delta_local[key][stride_size * i] = zero(eltype(idp_bounds_delta_local[key][stride_size])) + end end end diff --git a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl index 2fc62f548d2..9af8b65b4cd 100644 --- a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl +++ b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl @@ -470,6 +470,9 @@ end For subcell limiting, the calculation of local bounds for non-periodic domains require the boundary outer state. This function returns the boundary value at time `t` and for node with spatial indices `indices`. + +!!! warning "Experimental implementation" + This is an experimental feature and may change in future releases. """ @inline function get_boundary_outer_state(boundary_condition::BoundaryConditionDirichlet, cache, t, equations, dg, indices...) diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl index 384f4178bc9..3d272359fe4 100644 --- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl @@ -13,21 +13,32 @@ function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquat bound_keys) # Memory for bounds checking routine with `BoundsCheckCallback`. - # The first entry of each vector contains the maximum deviation since the last export. - # The second one contains the total maximum deviation. - idp_bounds_delta = Dict{Symbol, Vector{real(basis)}}() + # Local variable contains the maximum deviation since the last export. + # Using a threaded vector to parallelize bounds check. + idp_bounds_delta_local = Dict{Symbol, Vector{real(basis)}}() + # Global variable contains the total maximum deviation. + idp_bounds_delta_global = Dict{Symbol, real(basis)}() + # Note: False sharing causes critical performance issues on multiple threads when using a vector + # of length `Threads.nthreads()`. Initializing a vector of length `n * Threads.nthreads()` + # and then only using every n-th entry, fixes the problem and allows proper scaling. + # Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)` + stride_size = div(128, sizeof(eltype(basis.nodes))) # = n for key in bound_keys - idp_bounds_delta[key] = zeros(real(basis), 2) + idp_bounds_delta_local[key] = [zero(real(basis)) + for _ in 1:(stride_size * Threads.nthreads())] + idp_bounds_delta_global[key] = zero(real(basis)) end - return (; subcell_limiter_coefficients, idp_bounds_delta) + return (; subcell_limiter_coefficients, idp_bounds_delta_local, + idp_bounds_delta_global) end function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSEM, t, dt; kwargs...) @unpack alpha = limiter.cache.subcell_limiter_coefficients - alpha .= zero(eltype(alpha)) + # TODO: Do not abuse `reset_du!` but maybe implement a generic `set_zero!` + @trixi_timeit timer() "reset alpha" reset_du!(alpha, dg, semi.cache) if limiter.local_minmax @trixi_timeit timer() "local min/max limiting" idp_local_minmax!(alpha, limiter, From 4b07706f01cc17c7ef6215c21da312e221cb0c00 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Tue, 30 Jan 2024 20:32:24 +0100 Subject: [PATCH 11/22] Do not safe sol (#1820) --- examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl index fcd2ca00e10..7e55a259596 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl @@ -103,6 +103,7 @@ callbacks = CallbackSet(summary_callback, ############################################################################### sol = solve(ode, SSPRK104(; thread = OrdinaryDiffEq.True()); dt = 1.0, # overwritten by the `stepsize_callback` + save_everystep = false, callback = callbacks); summary_callback() # print the timer summary From 07990295b7ba155669ef4c2953809abe8e3c4880 Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Wed, 31 Jan 2024 13:21:11 +0000 Subject: [PATCH 12/22] Sc/converters coupling (#1558) * Added coupling converters. * Added generic converter_function for structured 2d meshes. * Added example elixir for coupling converters. * Cleaned up converter coupling elixir. * Added equations in coupling converters. * Added converter functions. * Added identity converter function. * Autoformat for converter coupling implementation. * Added coupled converter elixir. * Corrected file name of coupled converters test. * Removed redundant doc string. * Added function signature in doc string. * Removed coverage_override in coupled tests. * Removed old commented code. * Update make.jl Added interface coupling docs to the main menu. * Update make.jl Moved converter coupling section. * Create coupling.md * Update coupling.md Added some documentation on coupling converters. * Removed troublesome AnalysisCallbackCoupled from test. * Chenged coupling converter function. * Changed coupling converter function and updated tests. * Sepcialized coupling function call. * Removed volume coupling from documentation to avoit confusion. * Update src/coupling_converters/coupling_converters.jl Co-authored-by: Hendrik Ranocha * Removed redundant converter function for coupling. * Removed redundant coupling converter file mentioned in some files. * Autoreformatted. * Removed old coupled elixir and replaced it with one using converter functions. * Updated errors for coupled tests. * Corrected test results for coupled equations. * Corrected comment. * Removed coupled test from special tests. * Removed coupled test from specials. * Chaned the coupling function to the identity. * Updated coupling tests. * Updated errors for coupled test. * Added advice about binary compatability for coupled equations in the documentation. * Typo. * Added numerical fluxes. * Corrected rs copy routine. Now loop over this semi's components. * Reformatted equations source file. * Removed problemating include of time_integration.jl. * Removed export of deleted methods. * Reverted to old version of compressible Euler multicomponent with no support for structured grid. * Renamed documentation file for multi-physics coupling. * Renamed doc reference. * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Reinstated structured_2d_dgsem coupled in special tests. * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Renamed CouplingFunction to CouplingConverter. * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Cleaned the copy of coupled boundary values. * Reduced time span for example coupling elixir. * Removed redundant loop. * Applied formatter. * Removed default coupling covnerter function. * Moved coupling converter function into elixir. * Apply suggestions from code review Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/make.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Removed coupling_converters.jl from the include. * Corrected introduced issue with coupling boundary copy. The latest change to clean up the boundary copying introduced a bug related to the determination of the wrong node indices. This is now corrected. * Corrected comment on final simulation time. * Updated errors for coupled test to reflect changed final simulation time. * Added miladd. * Corrected coordinate finding in semidiscretization_coupled. * Fixed issued related to memory allocation. * Corrected loop over semidiscretization. * Removed commented out code. * Fixed type instability with loops over semidiscretizations using lispy tuple programming. * Removed obsolete code. * Fixed another typa instability in coupled semidiscretization. * Cleaning up of the coupled semidiscretization. * Autoformatted coupled semidiscretization. * Fixed last type instability in coupling. * Autoformatter on semidiscretization. * Fixed bug in boundary values copy that arose when coupling multiple systems. * aplpied autoformatter on coupled semidiscretization. * Extended the structured 2d example elixir for the coupled advection to 4 semidiscretizations. This hase two purpuses: 1. Users are given an example fro 2d coupling avoiding common pitfalls. 2. This increases the code coverege for the test. * Updated test results for coupled advection in 2d to reflect the 4 semidiscretizations that are now used. * Added correct errors for tests for the coupled adveciton equations in structured 2d. * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Corrected foreach_enumerate implementation. * Fix closing parens * Remove unused recursive rhs! * Pass equations to converter function * Apply formatting * Reverted copy_to_coupled_boundary to previou version to avoid type instability. * Corrected computation of coupled semidiscretizations and fixed memory issue. * Removed redundant nelements function, as it is no longer used. * Applied autoformatter. * Improvements in style and added info about passing equations to coupling functions, as suggested by Andrew and Daniel. * Restored timings in semidiscretization coupled. --------- Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> --- docs/make.jl | 1 + docs/src/multi-physics_coupling.md | 46 +++++ .../elixir_advection_coupled.jl | 191 +++++++++++++----- .../semidiscretization_coupled.jl | 191 ++++++++++++------ test/test_structured_2d.jl | 28 ++- 5 files changed, 340 insertions(+), 117 deletions(-) create mode 100644 docs/src/multi-physics_coupling.md diff --git a/docs/make.jl b/docs/make.jl index dee87371bd1..7fce3b31e24 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -108,6 +108,7 @@ makedocs( ], "Time integration" => "time_integration.md", "Callbacks" => "callbacks.md", + "Coupling" => "multi-physics_coupling.md" ], "Advanced topics & developers" => [ "Conventions" =>"conventions.md", diff --git a/docs/src/multi-physics_coupling.md b/docs/src/multi-physics_coupling.md new file mode 100644 index 00000000000..eec92bc21de --- /dev/null +++ b/docs/src/multi-physics_coupling.md @@ -0,0 +1,46 @@ +# [Multi-physics coupling](@id multi-physics-coupling) +A complex simulation can consist of different spatial domains in which +different equations are being solved, different numerical methods being used +or the grid structure is different. +One example would be a fluid in a tank and an extended hot plate attached to it. +We would then like to solve the Navier-Stokes equations in the fluid domain +and the heat conduction equations in the plate. +The coupling would happen at the interface through the exchange of thermal energy. + + +## Converter coupling +It may happen that the two systems to be coupled do not share any variables, but +share some of the physics. +In such a situation, the same physics is just represented in a different form and with +a different set of variables. +This is the case, for instance assuming two domains, if there is a fluid system in one domain +and a Vlasov system in the other domain. +In that case we would have variables representing distribution functions of +the Vlasov system on one side and variables representing the mechanical quantities, like density, +of the fluid system. +To translate the fields from one description to the other one needs to use +converter functions. +These functions need to be hand tailored by the user in the elixir file where each +pair of coupled systems requires two coupling functions, one for each direction. + +In the general case, we have a system $A$ with $m$ variables +$u_{A,i}, \: i = 1, \dots, m$ and another +system $B$ with $n$ variables $u_{B,j}, \: j = 1, \dots, n$. +We then define two coupling functions, one that transforms $u_A$ into $u_B$ +and one that goes the other way. + +In their minimal form they take the position vector $x$, state vector $u$ +and the equations of the two coupled systems +and return the transformed variables. +By passing the equations we can make use of their parameters, if they are required. +Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled.jl`. + + +## Warning about binary compatibility +Currently the coordinate values on the nodes can differ by machine precision when +simulating the mesh and when splitting the mesh in multiple domains. +This is an issue coming from the coordinate interpolation on the nodes. +As a result, running a simulation in a single system and in two coupled domains +may result in a difference of the order of the machine precision. +While this is not an issue for most practical problems, it is best to keep this in mind when comparing test runs. + diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled.jl b/examples/structured_2d_dgsem/elixir_advection_coupled.jl index 2a56d23f4c0..43b68f21b03 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled.jl @@ -2,31 +2,38 @@ using OrdinaryDiffEq using Trixi ############################################################################### -# Coupled semidiscretization of two linear advection systems, which are connected periodically +# Coupled semidiscretization of four linear advection systems using converter functions such that +# they are also coupled across the domain boundaries to generate a periodic system. # -# In this elixir, we have a square domain that is divided into a left half and a right half. On each -# half of the domain, a completely independent SemidiscretizationHyperbolic is created for the -# linear advection equations. The two systems are coupled in the x-direction and have periodic -# boundaries in the y-direction. For a high-level overview, see also the figure below: +# In this elixir, we have a square domain that is divided into a upper-left, lower-left, +# upper-right and lower-right quarter. On each quarter +# of the domain, a completely independent SemidiscretizationHyperbolic is created for the +# linear advection equations. The four systems are coupled in the x and y-direction. +# For a high-level overview, see also the figure below: # # (-1, 1) ( 1, 1) # ┌────────────────────┬────────────────────┐ -# │ ↑ periodic ↑ │ ↑ periodic ↑ │ -# │ │ │ +# │ ↑ coupled ↑ │ ↑ coupled ↑ │ # │ │ │ # │ ========= │ ========= │ # │ system #1 │ system #2 │ # │ ========= │ ========= │ # │ │ │ +# │<-- coupled │<-- coupled │ +# │ coupled -->│ coupled -->│ # │ │ │ +# │ ↓ coupled ↓ │ ↓ coupled ↓ │ +# ├────────────────────┼────────────────────┤ +# │ ↑ coupled ↑ │ ↑ coupled ↑ │ # │ │ │ +# │ ========= │ ========= │ +# │ system #3 │ system #4 │ +# │ ========= │ ========= │ # │ │ │ -# │ coupled -->│<-- coupled │ -# │ │ │ -# │<-- coupled │ coupled -->│ -# │ │ │ +# │<-- coupled │<-- coupled │ +# │ coupled -->│ coupled -->│ # │ │ │ -# │ ↓ periodic ↓ │ ↓ periodic ↓ │ +# │ ↓ coupled ↓ │ ↓ coupled ↓ │ # └────────────────────┴────────────────────┘ # (-1, -1) ( 1, -1) @@ -36,60 +43,135 @@ equations = LinearScalarAdvectionEquation2D(advection_velocity) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) -# First mesh is the left half of a [-1,1]^2 square -coordinates_min1 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +# This will be the number of elements for each quarter/semidiscretization. +cells_per_dimension = (8, 8) + +########### +# system #1 +########### + +coordinates_min1 = (-1.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max1 = (0.0, 1.0) # maximum coordinates (max(x), max(y)) -# Define identical resolution as a variable such that it is easier to change from `trixi_include` -cells_per_dimension = (8, 16) +mesh1 = StructuredMesh(cells_per_dimension, coordinates_min1, coordinates_max1) -cells_per_dimension1 = cells_per_dimension +# Define the coupling functions +coupling_function12 = (x, u, equations_other, equations_own) -> u +coupling_function13 = (x, u, equations_other, equations_own) -> u -mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, + coupling_function12) +boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, + coupling_function12) +boundary_conditions_y_neg1 = BoundaryConditionCoupled(3, (:i_forward, :end), Float64, + coupling_function13) +boundary_conditions_y_pos1 = BoundaryConditionCoupled(3, (:i_forward, :begin), Float64, + coupling_function13) # A semidiscretization collects data structures and functions for the spatial discretization semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, - boundary_conditions = ( - # Connect left boundary with right boundary of right mesh - x_neg = BoundaryConditionCoupled(2, - (:end, - :i_forward), - Float64), - # Connect right boundary with left boundary of right mesh - x_pos = BoundaryConditionCoupled(2, - (:begin, - :i_forward), - Float64), - y_neg = boundary_condition_periodic, - y_pos = boundary_condition_periodic)) - -# Second mesh is the right half of a [-1,1]^2 square -coordinates_min2 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) + boundary_conditions = (x_neg = boundary_conditions_x_neg1, + x_pos = boundary_conditions_x_pos1, + y_neg = boundary_conditions_y_neg1, + y_pos = boundary_conditions_y_pos1)) + +########### +# system #2 +########### + +coordinates_min2 = (0.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) -cells_per_dimension2 = cells_per_dimension +mesh2 = StructuredMesh(cells_per_dimension, coordinates_min2, coordinates_max2) -mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) +# Define the coupling functions +coupling_function21 = (x, u, equations_other, equations_own) -> u +coupling_function24 = (x, u, equations_other, equations_own) -> u +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, + coupling_function21) +boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, + coupling_function21) +boundary_conditions_y_neg2 = BoundaryConditionCoupled(4, (:i_forward, :end), Float64, + coupling_function24) +boundary_conditions_y_pos2 = BoundaryConditionCoupled(4, (:i_forward, :begin), Float64, + coupling_function24) + +# A semidiscretization collects data structures and functions for the spatial discretization semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, - boundary_conditions = ( - # Connect left boundary with right boundary of left mesh - x_neg = BoundaryConditionCoupled(1, - (:end, - :i_forward), - Float64), - # Connect right boundary with left boundary of left mesh - x_pos = BoundaryConditionCoupled(1, - (:begin, - :i_forward), - Float64), - y_neg = boundary_condition_periodic, - y_pos = boundary_condition_periodic)) - -# Create a semidiscretization that bundles semi1 and semi2 -semi = SemidiscretizationCoupled(semi1, semi2) + boundary_conditions = (x_neg = boundary_conditions_x_neg2, + x_pos = boundary_conditions_x_pos2, + y_neg = boundary_conditions_y_neg2, + y_pos = boundary_conditions_y_pos2)) + +########### +# system #3 +########### + +coordinates_min3 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max3 = (0.0, 0.0) # maximum coordinates (max(x), max(y)) + +mesh3 = StructuredMesh(cells_per_dimension, coordinates_min3, coordinates_max3) + +# Define the coupling functions +coupling_function34 = (x, u, equations_other, equations_own) -> u +coupling_function31 = (x, u, equations_other, equations_own) -> u + +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg3 = BoundaryConditionCoupled(4, (:end, :i_forward), Float64, + coupling_function34) +boundary_conditions_x_pos3 = BoundaryConditionCoupled(4, (:begin, :i_forward), Float64, + coupling_function34) +boundary_conditions_y_neg3 = BoundaryConditionCoupled(1, (:i_forward, :end), Float64, + coupling_function31) +boundary_conditions_y_pos3 = BoundaryConditionCoupled(1, (:i_forward, :begin), Float64, + coupling_function31) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi3 = SemidiscretizationHyperbolic(mesh3, equations, initial_condition_convergence_test, + solver, + boundary_conditions = (x_neg = boundary_conditions_x_neg3, + x_pos = boundary_conditions_x_pos3, + y_neg = boundary_conditions_y_neg3, + y_pos = boundary_conditions_y_pos3)) + +########### +# system #4 +########### + +coordinates_min4 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max4 = (1.0, 0.0) # maximum coordinates (max(x), max(y)) + +mesh4 = StructuredMesh(cells_per_dimension, coordinates_min4, coordinates_max4) + +# Define the coupling functions +coupling_function43 = (x, u, equations_other, equations_own) -> u +coupling_function42 = (x, u, equations_other, equations_own) -> u + +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg4 = BoundaryConditionCoupled(3, (:end, :i_forward), Float64, + coupling_function43) +boundary_conditions_x_pos4 = BoundaryConditionCoupled(3, (:begin, :i_forward), Float64, + coupling_function43) +boundary_conditions_y_neg4 = BoundaryConditionCoupled(2, (:i_forward, :end), Float64, + coupling_function42) +boundary_conditions_y_pos4 = BoundaryConditionCoupled(2, (:i_forward, :begin), Float64, + coupling_function42) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi4 = SemidiscretizationHyperbolic(mesh4, equations, initial_condition_convergence_test, + solver, + boundary_conditions = (x_neg = boundary_conditions_x_neg4, + x_pos = boundary_conditions_x_pos4, + y_neg = boundary_conditions_y_neg4, + y_pos = boundary_conditions_y_pos4)) + +# Create a semidiscretization that bundles all the semidiscretizations. +semi = SemidiscretizationCoupled(semi1, semi2, semi3, semi4) ############################################################################### # ODE solvers, callbacks etc. @@ -104,7 +186,10 @@ summary_callback = SummaryCallback() # The AnalysisCallback allows to analyse the solution in regular intervals and prints the results analysis_callback1 = AnalysisCallback(semi1, interval = 100) analysis_callback2 = AnalysisCallback(semi2, interval = 100) -analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2) +analysis_callback3 = AnalysisCallback(semi3, interval = 100) +analysis_callback4 = AnalysisCallback(semi4, interval = 100) +analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2, + analysis_callback3, analysis_callback4) # The SaveSolutionCallback allows to save the solution to a file in regular intervals save_solution = SaveSolutionCallback(interval = 100, diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 0941ae6a8ca..dc21dbe9a1e 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -1,3 +1,10 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + """ SemidiscretizationCoupled @@ -65,11 +72,13 @@ function Base.show(io::IO, ::MIME"text/plain", semi::SemidiscretizationCoupled) summary_line(io, "system", i) mesh, equations, solver, _ = mesh_equations_solver_cache(semi.semis[i]) summary_line(increment_indent(io), "mesh", mesh |> typeof |> nameof) - summary_line(increment_indent(io), "equations", equations |> typeof |> nameof) + summary_line(increment_indent(io), "equations", + equations |> typeof |> nameof) summary_line(increment_indent(io), "initial condition", semi.semis[i].initial_condition) # no boundary conditions since that could be too much - summary_line(increment_indent(io), "source terms", semi.semis[i].source_terms) + summary_line(increment_indent(io), "source terms", + semi.semis[i].source_terms) summary_line(increment_indent(io), "solver", solver |> typeof |> nameof) end summary_line(io, "total #DOFs per field", ndofs(semi)) @@ -106,20 +115,14 @@ end @inline Base.real(semi::SemidiscretizationCoupled) = promote_type(real.(semi.semis)...) -@inline Base.eltype(semi::SemidiscretizationCoupled) = promote_type(eltype.(semi.semis)...) +@inline function Base.eltype(semi::SemidiscretizationCoupled) + promote_type(eltype.(semi.semis)...) +end @inline function ndofs(semi::SemidiscretizationCoupled) sum(ndofs, semi.semis) end -@inline function nelements(semi::SemidiscretizationCoupled) - return sum(semi.semis) do semi_ - mesh, equations, solver, cache = mesh_equations_solver_cache(semi_) - - nelements(mesh, solver, cache) - end -end - function compute_coefficients(t, semi::SemidiscretizationCoupled) @unpack u_indices = semi @@ -137,23 +140,40 @@ end @view u_ode[semi.u_indices[index]] end +# Same as `foreach(enumerate(something))`, but without allocations. +# +# Note that compile times may increase if this is used with big tuples. +@inline foreach_enumerate(func, collection) = foreach_enumerate(func, collection, 1) +@inline foreach_enumerate(func, collection::Tuple{}, index) = nothing + +@inline function foreach_enumerate(func, collection, index) + element = first(collection) + remaining_collection = Base.tail(collection) + + func((index, element)) + + # Process remaining collection + foreach_enumerate(func, remaining_collection, index + 1) +end + function rhs!(du_ode, u_ode, semi::SemidiscretizationCoupled, t) @unpack u_indices = semi time_start = time_ns() @trixi_timeit timer() "copy to coupled boundaries" begin - for semi_ in semi.semis - copy_to_coupled_boundary!(semi_.boundary_conditions, u_ode, semi) + foreach(semi.semis) do semi_ + copy_to_coupled_boundary!(semi_.boundary_conditions, u_ode, semi, semi_) end end # Call rhs! for each semidiscretization - for i in eachsystem(semi) - u_loc = get_system_u_ode(u_ode, i, semi) - du_loc = get_system_u_ode(du_ode, i, semi) - - @trixi_timeit timer() "system #$i" rhs!(du_loc, u_loc, semi.semis[i], t) + @trixi_timeit timer() "copy to coupled boundaries" begin + foreach_enumerate(semi.semis) do (i, semi_) + u_loc = get_system_u_ode(u_ode, i, semi) + du_loc = get_system_u_ode(du_ode, i, semi) + rhs!(du_loc, u_loc, semi_, t) + end end runtime = time_ns() - time_start @@ -309,7 +329,8 @@ end for i in eachsystem(semi) u_ode_slice = get_system_u_ode(u_ode, i, semi) - save_solution_file(semis[i], u_ode_slice, solution_callback, integrator, system = i) + save_solution_file(semis[i], u_ode_slice, solution_callback, integrator, + system = i) end end @@ -332,7 +353,7 @@ end ################################################################################ """ - BoundaryConditionCoupled(other_semi_index, indices, uEltype) + BoundaryConditionCoupled(other_semi_index, indices, uEltype, coupling_converter) Boundary condition to glue two meshes together. Solution values at the boundary of another mesh will be used as boundary values. This requires the use @@ -348,32 +369,37 @@ This is currently only implemented for [`StructuredMesh`](@ref). - `indices::Tuple`: node/cell indices at the boundary of the mesh in the other semidiscretization. See examples below. - `uEltype::Type`: element type of solution +- `coupling_converter::CouplingConverter`: function to call for converting the solution + state of one system to the other system # Examples ```julia # Connect the left boundary of mesh 2 to our boundary such that our positive # boundary direction will match the positive y direction of the other boundary -BoundaryConditionCoupled(2, (:begin, :i), Float64) +BoundaryConditionCoupled(2, (:begin, :i), Float64, fun) # Connect the same two boundaries oppositely oriented -BoundaryConditionCoupled(2, (:begin, :i_backwards), Float64) +BoundaryConditionCoupled(2, (:begin, :i_backwards), Float64, fun) # Using this as y_neg boundary will connect `our_cells[i, 1, j]` to `other_cells[j, end-i, end]` -BoundaryConditionCoupled(2, (:j, :i_backwards, :end), Float64) +BoundaryConditionCoupled(2, (:j, :i_backwards, :end), Float64, fun) ``` !!! warning "Experimental code" This is an experimental feature and can change any time. """ -mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices} +mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices, + CouplingConverter} # NDIMST2M1 == NDIMS * 2 - 1 # Buffer for boundary values: [variable, nodes_i, nodes_j, cell_i, cell_j] - u_boundary :: Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 - other_semi_index :: Int - other_orientation :: Int - indices :: Indices - - function BoundaryConditionCoupled(other_semi_index, indices, uEltype) + u_boundary :: Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 + other_semi_index :: Int + other_orientation :: Int + indices :: Indices + coupling_converter :: CouplingConverter + + function BoundaryConditionCoupled(other_semi_index, indices, uEltype, + coupling_converter) NDIMS = length(indices) u_boundary = Array{uEltype, NDIMS * 2 - 1}(undef, ntuple(_ -> 0, NDIMS * 2 - 1)) @@ -385,8 +411,10 @@ mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indic other_orientation = 3 end - new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices)}(u_boundary, other_semi_index, - other_orientation, indices) + new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices), + typeof(coupling_converter)}(u_boundary, + other_semi_index, other_orientation, + indices, coupling_converter) end end @@ -395,8 +423,10 @@ function Base.eltype(boundary_condition::BoundaryConditionCoupled) end function (boundary_condition::BoundaryConditionCoupled)(u_inner, orientation, direction, - cell_indices, surface_node_indices, - surface_flux_function, equations) + cell_indices, + surface_node_indices, + surface_flux_function, + equations) # get_node_vars(boundary_condition.u_boundary, equations, solver, surface_node_indices..., cell_indices...), # but we don't have a solver here u_boundary = SVector(ntuple(v -> boundary_condition.u_boundary[v, @@ -421,13 +451,15 @@ function allocate_coupled_boundary_conditions(semi::AbstractSemidiscretization) for direction in 1:n_boundaries boundary_condition = semi.boundary_conditions[direction] - allocate_coupled_boundary_condition(boundary_condition, direction, mesh, equations, + allocate_coupled_boundary_condition(boundary_condition, direction, mesh, + equations, solver) end end # Don't do anything for other BCs than BoundaryConditionCoupled -function allocate_coupled_boundary_condition(boundary_condition, direction, mesh, equations, +function allocate_coupled_boundary_condition(boundary_condition, direction, mesh, + equations, solver) return nothing end @@ -448,43 +480,69 @@ function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditi end # Don't do anything for other BCs than BoundaryConditionCoupled -function copy_to_coupled_boundary!(boundary_condition, u_ode, semi) +function copy_to_coupled_boundary!(boundary_condition, u_ode, semi_coupled, semi) return nothing end +function copy_to_coupled_boundary!(u_ode, semi_coupled, semi, i, n_boundaries, + boundary_condition, boundary_conditions...) + copy_to_coupled_boundary!(boundary_condition, u_ode, semi_coupled, semi) + if i < n_boundaries + copy_to_coupled_boundary!(u_ode, semi_coupled, semi, i + 1, n_boundaries, + boundary_conditions...) + end +end + function copy_to_coupled_boundary!(boundary_conditions::Union{Tuple, NamedTuple}, u_ode, - semi) - for boundary_condition in boundary_conditions - copy_to_coupled_boundary!(boundary_condition, u_ode, semi) + semi_coupled, semi) + copy_to_coupled_boundary!(u_ode, semi_coupled, semi, 1, length(boundary_conditions), + boundary_conditions...) +end + +function mesh_equations_solver_cache(other_semi_index, i, semi_, semi_tuple...) + if i == other_semi_index + return mesh_equations_solver_cache(semi_) + else + # Walk through semidiscretizations until we find `i` + mesh_equations_solver_cache(other_semi_index, i + 1, semi_tuple...) end end # In 2D -function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{2}, u_ode, - semi) - @unpack u_indices = semi +function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{2}, + u_ode, + semi_coupled, semi) + @unpack u_indices = semi_coupled @unpack other_semi_index, other_orientation, indices = boundary_condition + @unpack coupling_converter, u_boundary = boundary_condition + + mesh_own, equations_own, solver_own, cache_own = mesh_equations_solver_cache(semi) + + mesh_other, equations_other, solver_other, cache_other = mesh_equations_solver_cache(other_semi_index, + 1, + semi_coupled.semis...) - mesh, equations, solver, cache = mesh_equations_solver_cache(semi.semis[other_semi_index]) - u = wrap_array(get_system_u_ode(u_ode, other_semi_index, semi), mesh, equations, solver, - cache) + node_coordinates_other = cache_other.elements.node_coordinates + u_ode_other = get_system_u_ode(u_ode, other_semi_index, semi_coupled) + u_other = wrap_array(u_ode_other, mesh_other, equations_other, solver_other, + cache_other) - linear_indices = LinearIndices(size(mesh)) + linear_indices = LinearIndices(size(mesh_other)) if other_orientation == 1 - cells = axes(mesh, 2) + cells = axes(mesh_other, 2) else # other_orientation == 2 - cells = axes(mesh, 1) + cells = axes(mesh_other, 1) end # Copy solution data to the coupled boundary using "delayed indexing" with # a start value and a step size to get the correct face and orientation. - node_index_range = eachnode(solver) + node_index_range = eachnode(solver_other) i_node_start, i_node_step = index_to_start_step_2d(indices[1], node_index_range) j_node_start, j_node_step = index_to_start_step_2d(indices[2], node_index_range) - i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh, 1)) - j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh, 2)) + i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh_other, 1)) + j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh_other, 2)) i_cell = i_cell_start j_cell = j_cell_start @@ -492,16 +550,26 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ for cell in cells i_node = i_node_start j_node = j_node_start - - for i in eachnode(solver) - for v in 1:size(u, 1) - boundary_condition.u_boundary[v, i, cell] = u[v, i_node, j_node, - linear_indices[i_cell, - j_cell]] + element_id = linear_indices[i_cell, j_cell] + + for element_id in eachnode(solver_other) + x_other = get_node_coords(node_coordinates_other, equations_other, + solver_other, + i_node, j_node, linear_indices[i_cell, j_cell]) + u_node_other = get_node_vars(u_other, equations_other, solver_other, i_node, + j_node, linear_indices[i_cell, j_cell]) + u_node_converted = coupling_converter(x_other, u_node_other, + equations_other, + equations_own) + + for i in eachindex(u_node_converted) + u_boundary[i, element_id, cell] = u_node_converted[i] end + i_node += i_node_step j_node += j_node_step end + i_cell += i_cell_step j_cell += j_cell_step end @@ -511,7 +579,8 @@ end ### DGSEM/structured ################################################################################ -@inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t, orientation, +@inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t, + orientation, boundary_condition::BoundaryConditionCoupled, mesh::StructuredMesh, equations, surface_integral, dg::DG, cache, @@ -531,7 +600,8 @@ end sign_jacobian = sign(inverse_jacobian[node_indices..., element]) # Contravariant vector Ja^i is the normal vector - normal = sign_jacobian * get_contravariant_vector(orientation, contravariant_vectors, + normal = sign_jacobian * + get_contravariant_vector(orientation, contravariant_vectors, node_indices..., element) # If the mapping is orientation-reversing, the normal vector will be reversed (see above). @@ -608,3 +678,4 @@ function analyze_convergence(errors_coupled, iterations, return eoc_mean_values end +end # @muladd diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index e5d45ebcc07..522510a42e3 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -33,14 +33,34 @@ end @trixi_testset "elixir_advection_coupled.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled.jl"), - l2=[7.816742843181738e-6, 7.816742843196112e-6], - linf=[6.314906965543265e-5, 6.314906965410039e-5], + l2=[ + 7.816742843336293e-6, + 7.816742843340186e-6, + 7.816742843025513e-6, + 7.816742843061526e-6, + ], + linf=[ + 6.314906965276812e-5, + 6.314906965187994e-5, + 6.31490696496595e-5, + 6.314906965032563e-5, + ], coverage_override=(maxiters = 10^5,)) @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin errors = analysis_callback(sol) - @test errors.l2≈[7.816742843181738e-6, 7.816742843196112e-6] rtol=1.0e-4 - @test errors.linf≈[6.314906965543265e-5, 6.314906965410039e-5] rtol=1.0e-4 + @test errors.l2≈[ + 7.816742843336293e-6, + 7.816742843340186e-6, + 7.816742843025513e-6, + 7.816742843061526e-6, + ] rtol=1.0e-4 + @test errors.linf≈[ + 6.314906965276812e-5, + 6.314906965187994e-5, + 6.31490696496595e-5, + 6.314906965032563e-5, + ] rtol=1.0e-4 # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let From d53fdb59fe71c1ba956fc758b5874c3914304ddf Mon Sep 17 00:00:00 2001 From: Johannes Markert <10619309+jmark@users.noreply.github.com> Date: Wed, 31 Jan 2024 16:49:35 +0100 Subject: [PATCH 13/22] Feature: T8codeMesh backend with MPI support (#1803) * Initial commit for the new feature using t8code as meshing backend. * Delete t8code_2d_dgsem * Added new examples and tests. Testing updates for T8code.jl. * Worked in the comments. * Fixed spelling. * Update src/auxiliary/auxiliary.jl Co-authored-by: Hendrik Ranocha * Added whitespace in Unions. * Adapted commented out code block reporting the no. of elements per level. * Added dummy save mesh support for . * Added test . * Added to method signature. * Deleted unnecessary comments. * Removed commented out tests. * Fixed Morton ordering bug in 2D at mortar interfaces. * Disabled `save_solution` callbacks and added more tests. * Added more tests. * Updated code according to the review. * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/t8code.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/solvers/dgsem_t8code/containers_2d.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Code cleanup. * Updated to T8code@0.3.0 * Fixing minor issues. * Fixed typo. * Code cleanup. * Enabled `set_ghost` in examples. * Generalized type info in function signature. * Added namespace qualifier. * Updated comments. * Refactored code and deleted lots of it. * Removed a copy operation. * Initial commit. * Fxinig minor bugs. * Fixed minor typo. * Added first 3d example and fixed segfault. * Added many 3D examples and tests. * Backup. * Fixed merging issues. * Adding more tests. * Fixed some merging issues and formatting. * Fixed spelling. * Fixed spelling and changed assert macro. * Applied automatic formatting. * Applied automatic formatting. * Backup. * Removed superfluous outer constructor for T8codeMesh. * Added return statement for consistency. * Fixed wrong indentation by autoformatter. * Added comments. * Made sure an exception is thrown. * Changed flags for sc_init for t8code initialization. * Updated formatting. * Workaround for error about calling MPI routines after MPI has been finalized. * Upped to T8code v0.4.1. * Added mpi_finailize_hook for proper memory cleanup. * Added t8code to test_threaded.jl * Added a `save_mesh_file` call in order to satisfy code coverage. * Improved finalizer logic for T8coeMesh. * Refined code. * Restructured to do blocks. * Moved save_mesh_file call to test file. * Fixed spelling error. * Made sc_finalize optional. * Fixed spelling. * Cleaned up examples. * Updated and cleaned t8code solver codes. * Updated tests for t8code 3D code. * Fixed spelling. * Update elixir_euler_source_terms_nonconforming_unstructured_curved.jl * Update elixir_euler_source_terms_nonconforming_unstructured_curved.jl * Fixed indentation. * Update src/solvers/dgsem_structured/dg_3d.jl Co-authored-by: Hendrik Ranocha * Update src/solvers/dgsem_t8code/containers_3d.jl Co-authored-by: Andrew Winters * Update src/callbacks_step/amr_dg3d.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_euler_ec.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl Co-authored-by: Andrew Winters * Update src/solvers/dgsem_structured/dg_3d.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/callbacks_step/analysis_dg3d.jl Co-authored-by: Hendrik Ranocha * Update examples/t8code_3d_dgsem/elixir_euler_free_stream.jl Co-authored-by: Andrew Winters * Removed NDIMS from T8codeMesh construction in case of p4est/p8est connectivity input. * Aligned T8codeMesh constructur with other mesh constructors. * Update examples/t8code_3d_dgsem/elixir_euler_sedov.jl Co-authored-by: Andrew Winters * Update examples/t8code_3d_dgsem/elixir_euler_sedov.jl Co-authored-by: Andrew Winters * Cleanup up. * Added @allocated test. * Fixed formatting. * Applied formatter. * Added ParallelT8codeMesh to constructor. * Code cleanup. * Extended T8codeMesh backend to support MPI interfaces datastructures. * Update NEWS.md (#1780) * set version to v0.6.5 * set development version to v0.6.6-pre * hotfix: restrict DiffEqBase.jl to let CI pass (#1788) * hotfix: restrict DiffEqBase.jl to let CI pass * restrict DiffEqBase.jl in main Project.toml * Update Project.toml * Parabolic Mortar for AMR `P4estMesh{3}` (#1765) * Clean branch * Un-Comment * un-comment * test coarsen * remove redundancy * Remove support for passive terms * expand resize * comments * format * Avoid code duplication * Update src/callbacks_step/amr_dg1d.jl Co-authored-by: Michael Schlottke-Lakemper * comment * comment & format * Try to increase coverage * Slightly more expressive names * Apply suggestions from code review * add specifier for 1d * Structs for resizing parabolic helpers * check if mortars are present * reuse `reinitialize_containers!` * resize calls for parabolic helpers * update analysis callbacks * Velocities for compr euler * Init container * correct copy-paste error * resize each dim * add dispatch * Add AMR for shear layer * USe only amr shear layer * first steps towards p4est parabolic amr * Add tests * remove plots * Format * remove redundant line * platform independent tests * No need for different flux_viscous comps after adding container_viscous to p4est * Laplace 3d * Longer times to allow converage to hit coarsen! * Increase testing of Laplace 3D * Add tests for velocities * remove comment * add elixir for amr testing * adding commented out mortar routines in 2D * Adding Mortar to 2d dg parabolic term * remove testing snippet * fix comments * add more arguments for dispatch * add some temporary todo notes * some updates for AP and KS * specialize mortar_fluxes_to_elements * BUGFIX: apply_jacobian_parabolic! was incorrect for P4estMesh * fixed rhs_parabolic! for mortars * more changes to elixir * indexing bug * comments * Adding the example for nonperiodic BCs with amr * hopefully this fixes AMR boundaries for parabolic terms * add elixir * Example with non periodic bopundary conditions * remove cruft * 3D parabolic amr * TGV elixir * Creating test for AMR 3D parabolic * Formatting * test formatting * Update src/Trixi.jl * Update src/equations/compressible_euler_1d.jl * Update src/equations/compressible_euler_2d.jl * Update src/equations/compressible_euler_3d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_2d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_tree/container_viscous_3d.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update src/solvers/dgsem_tree/dg_2d_parabolic.jl * Update test/test_parabolic_3d.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_tree/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_2d_parabolic.jl * Update test/test_parabolic_3d.jl * Update test/test_parabolic_3d.jl * Update test/test_parabolic_3d.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Update elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Update test_parabolic_3d.jl * Update examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_3d_blast_wave_amr.jl * Update elixir_navierstokes_taylor_green_vortex_amr.jl * Update dg_3d_parabolic.jl * Update test_parabolic_3d.jl * Delete examples/p4est_3d_dgsem/elixir_navierstokes_3d_blast_wave_amr.jl * Create elixir_navierstokes_blast_wave_amr.jl * Update test_parabolic_3d.jl * Update NEWS.md * Update NEWS.md * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl * Update src/solvers/dgsem_p4est/dg_3d_parabolic.jl --------- Co-authored-by: Daniel_Doehring Co-authored-by: Daniel Doehring Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> Co-authored-by: Jesse Chan * reset the timer also on non-root MPI processes (#1787) Co-authored-by: Michael Schlottke-Lakemper * Add HLLC flux for non-cartesian meshes to CompressibleEulerEquations{2,3}D (#1790) * add HLLC flux for non-cartesian meshes * add tests for HLLC flux * Add 2D test with HLLC * Update test_p4est_3d.jl * Update test_p4est_2d.jl * Update test_p4est_3d.jl * Update src/equations/compressible_euler_3d.jl Co-authored-by: Hendrik Ranocha * Update src/equations/compressible_euler_2d.jl Co-authored-by: Hendrik Ranocha * Update compressible_euler_2d.jl * Update compressible_euler_3d.jl * Update test_p4est_2d.jl * Update test_p4est_3d.jl * Update compressible_euler_2d.jl * Update compressible_euler_2d.jl --------- Co-authored-by: Daniel Doehring Co-authored-by: Hendrik Ranocha * Bump crate-ci/typos from 1.16.23 to 1.16.26 (#1793) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.16.23 to 1.16.26. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.16.23...v1.16.26) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Extend `CompressibleEulerQuasi1D` and `ShallowWaterQuasi1D` to `DGMulti` (#1797) * adding DGMulti versions of fluxes * remove incorrect factor of 2 * add example and test * formatting * add comment * revert removing factor of 2 * formatting * add SWE quasi-1D test d * enable quasi1D SWE for DGMulti * add docstrings * formatting * Update src/equations/compressible_euler_quasi_1d.jl Co-authored-by: Hendrik Ranocha * adding comments explaining why `normal_direction` is included in 1D * Apply suggestions from code review Co-authored-by: Daniel Doehring --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring * Fix boundary_condition_slip_wall for SWE (#1798) * fix_wall_bc * add test * apply formatter * adjust some comments * update elixir and test; add topography * comments explaining usage of `ForwardDiff.jacobian` (#1800) * Bump actions/upload-artifact from 3 to 4 (#1795) * Bump actions/upload-artifact from 3 to 4 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3 to 4. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * bump download-artifact to v4 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Hendrik Ranocha * Code cleanup. * Applied formatter. * Registered mpi t8code tests. * Added load-balancing feature to T8codeMesh. * Applied formatter. * Updated some test values. * Fixed typo. * Removed unused member variable. * Apply suggestions from code review Co-authored-by: Daniel Doehring * suggestions from review * fix format (strange?) * Added comments to help interpreting the source code. * Update src/callbacks_step/amr_dg3d.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Adhered to unified mesh constructor calling scheme. * Applied formatter. * Switched to Float64 instead of Cdouble. * Update src/meshes/t8code_mesh.jl Co-authored-by: Daniel Doehring * Refactored negative volume check. * Applied formatter. * Fixed typo resp. bug. * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * add missing allocation checks * Some refactoring. * Deleted msh file. * Fixed a bug. * Code cleanup. * Ignore gmsh files. * Removed adapt! from global namespace. * Added documentation. * Added @test_warn to test. * Applied formatter. * Apply suggestions from code review Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Turned @warn to @info. * Code cleanup and added @deprecated routines in order to avoid breaking release. * Applied formatter. * Added formatter pragmas to avoid ugly formatting. * Applied formatter. * Code cleanup. Documenting. * Applied formatter. * Removed remnants of development. * Removed dubious commented out line. * minors in comments skip ci * Added `retrieve` function which downloads missing mesh file without race conditions in case of MPI environment. * Added Downloads as dependency. * Added missing namespace qualifier. * add compat bound * Apply suggestions from code review Co-authored-by: Daniel Doehring Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * use retrieve everywhere for downloading * try to improve formatting * and what julia formatter makes out of it * try to consolidate max_dt computation * format * add default allocation tests * remove left over vtk_write * Revert "try to consolidate max_dt computation" This reverts commit d0e9a2cb2882f426a3966aac03e6ce497e1b720f. * fix variable name * less consolidation * remove unused variables * Update src/auxiliary/auxiliary.jl Co-authored-by: Hendrik Ranocha * Update src/Trixi.jl Co-authored-by: Hendrik Ranocha * revert b068d1183 (manually) * use Trixi.download * Revert "use retrieve everywhere for downloading" This reverts commit a438547e23bc489f12beea625dcc0905a50d18ce. * change to new Trixi.download signature * format * remove merge leftover * format * Code refactor. Applied review comments. * Fixed bug. * Fixed typo. * Update src/auxiliary/auxiliary.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Update src/meshes/t8code_mesh.jl Co-authored-by: Hendrik Ranocha * Applying review comments. * Added return early if there is nothing to do in adapt routines. --------- Signed-off-by: dependabot[bot] Co-authored-by: Johannes Markert Co-authored-by: Hendrik Ranocha Co-authored-by: Andrew Winters Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> Co-authored-by: Hendrik Ranocha Co-authored-by: Ahmad Peyvan <115842305+apey236@users.noreply.github.com> Co-authored-by: Daniel_Doehring Co-authored-by: Daniel Doehring Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Jesse Chan Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Patrick Ersing <114223904+patrickersing@users.noreply.github.com> Co-authored-by: Benedict Geihe Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --- Project.toml | 2 + .../elixir_2d_euler_vortex_unstructured.jl | 9 +- examples/dgmulti_2d/elixir_euler_hohqmesh.jl | 9 +- .../elixir_advection_amr_unstructured_flag.jl | 7 +- .../elixir_advection_unstructured_flag.jl | 7 +- .../elixir_euler_blast_wave_amr.jl | 7 +- .../elixir_euler_double_mach_amr.jl | 8 +- .../elixir_euler_forward_step_amr.jl | 8 +- .../elixir_euler_free_stream.jl | 7 +- ...e_terms_nonconforming_unstructured_flag.jl | 7 +- .../elixir_euler_supersonic_cylinder.jl | 8 +- .../elixir_euler_wall_bc_amr.jl | 8 +- examples/p4est_2d_dgsem/elixir_mhd_rotor.jl | 7 +- ...lixir_advection_amr_unstructured_curved.jl | 7 +- .../elixir_advection_unstructured_curved.jl | 7 +- examples/p4est_3d_dgsem/elixir_euler_ec.jl | 7 +- .../elixir_euler_free_stream.jl | 7 +- .../elixir_euler_free_stream_extruded.jl | 7 +- ...terms_nonconforming_unstructured_curved.jl | 7 +- ...euler_source_terms_nonperiodic_hohqmesh.jl | 8 +- .../elixir_shallowwater_well_balanced.jl | 1 - .../elixir_advection_amr_unstructured_flag.jl | 7 +- .../elixir_advection_unstructured_flag.jl | 7 +- .../elixir_euler_free_stream.jl | 7 +- ...e_terms_nonconforming_unstructured_flag.jl | 7 +- examples/t8code_2d_dgsem/elixir_mhd_rotor.jl | 7 +- ...lixir_advection_amr_unstructured_curved.jl | 9 +- .../elixir_advection_unstructured_curved.jl | 7 +- examples/t8code_3d_dgsem/elixir_euler_ec.jl | 7 +- .../elixir_euler_free_stream.jl | 7 +- .../elixir_euler_free_stream_extruded.jl | 7 +- ...terms_nonconforming_unstructured_curved.jl | 7 +- .../elixir_acoustics_gauss_wall.jl | 8 +- .../elixir_advection_basic.jl | 9 +- .../elixir_euler_basic.jl | 9 +- .../unstructured_2d_dgsem/elixir_euler_ec.jl | 9 +- .../elixir_euler_free_stream.jl | 9 +- .../elixir_euler_periodic.jl | 9 +- .../elixir_euler_sedov.jl | 7 +- .../elixir_euler_wall_bc.jl | 9 +- .../elixir_mhd_alfven_wave.jl | 8 +- .../unstructured_2d_dgsem/elixir_mhd_ec.jl | 8 +- .../elixir_shallowwater_dirichlet.jl | 8 +- .../elixir_shallowwater_ec.jl | 8 +- .../elixir_shallowwater_ec_shockcapturing.jl | 8 +- .../elixir_shallowwater_source_terms.jl | 8 +- ...ixir_shallowwater_three_mound_dam_break.jl | 13 +- ...lixir_shallowwater_twolayer_convergence.jl | 8 +- .../elixir_shallowwater_twolayer_dam_break.jl | 8 +- ...xir_shallowwater_twolayer_well_balanced.jl | 8 +- ...xir_shallowwater_wall_bc_shockcapturing.jl | 9 +- .../elixir_shallowwater_well_balanced.jl | 9 +- .../elixir_advection_basic.jl | 9 +- .../elixir_euler_free_stream.jl | 9 +- .../elixir_euler_source_terms.jl | 9 +- src/Trixi.jl | 1 + src/auxiliary/auxiliary.jl | 23 + src/auxiliary/t8code.jl | 235 +------ src/callbacks_step/amr.jl | 40 +- src/callbacks_step/amr_dg.jl | 11 +- src/callbacks_step/amr_dg2d.jl | 7 +- src/callbacks_step/amr_dg3d.jl | 7 +- src/callbacks_step/analysis_dg2d_parallel.jl | 6 +- src/callbacks_step/analysis_dg3d_parallel.jl | 6 +- src/callbacks_step/stepsize_dg2d.jl | 32 + src/callbacks_step/stepsize_dg3d.jl | 32 + src/meshes/p4est_mesh.jl | 4 + src/meshes/t8code_mesh.jl | 577 +++++++++++++++++- .../dgsem_p4est/containers_parallel.jl | 6 +- src/solvers/dgsem_p4est/dg_2d_parallel.jl | 21 +- src/solvers/dgsem_p4est/dg_3d_parallel.jl | 23 +- src/solvers/dgsem_p4est/dg_parallel.jl | 9 +- src/solvers/dgsem_t8code/containers.jl | 13 +- src/solvers/dgsem_t8code/containers_2d.jl | 3 +- src/solvers/dgsem_t8code/containers_3d.jl | 3 +- .../dgsem_t8code/containers_parallel.jl | 65 ++ src/solvers/dgsem_t8code/dg.jl | 7 +- src/solvers/dgsem_t8code/dg_parallel.jl | 135 ++++ src/solvers/dgsem_tree/dg_2d_parallel.jl | 3 +- test/test_mpi.jl | 4 +- test/test_mpi_p4est_2d.jl | 63 ++ test/test_mpi_p4est_3d.jl | 81 +++ test/test_mpi_t8code_2d.jl | 142 +++++ test/test_mpi_t8code_3d.jl | 180 ++++++ test/test_t8code_2d.jl | 7 +- 85 files changed, 1529 insertions(+), 640 deletions(-) create mode 100644 src/solvers/dgsem_t8code/containers_parallel.jl create mode 100644 src/solvers/dgsem_t8code/dg_parallel.jl create mode 100644 test/test_mpi_t8code_2d.jl create mode 100644 test/test_mpi_t8code_3d.jl diff --git a/Project.toml b/Project.toml index 0bbdec206d8..e99b08e0e81 100644 --- a/Project.toml +++ b/Project.toml @@ -9,6 +9,7 @@ ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" +Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" @@ -57,6 +58,7 @@ ConstructionBase = "1.3" DataStructures = "0.18.15" DiffEqBase = "6 - 6.143" DiffEqCallbacks = "2.25" +Downloads = "1.6" EllipsisNotation = "1.0" FillArrays = "0.13.2, 1" ForwardDiff = "0.10.18" diff --git a/benchmark/elixir_2d_euler_vortex_unstructured.jl b/benchmark/elixir_2d_euler_vortex_unstructured.jl index 082b6648abf..43e4b6559de 100644 --- a/benchmark/elixir_2d_euler_vortex_unstructured.jl +++ b/benchmark/elixir_2d_euler_vortex_unstructured.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -49,11 +48,9 @@ end initial_condition = initial_condition_isentropic_vortex solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) -default_mesh_file = joinpath(@__DIR__, "mesh_uniform_cartesian.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/ranocha/f4ea19ba3b62348968c971db43d7798b/raw/a506abb9479c020920cf6068c142670fc1a9aadc/mesh_uniform_cartesian.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/ranocha/f4ea19ba3b62348968c971db43d7798b/raw/a506abb9479c020920cf6068c142670fc1a9aadc/mesh_uniform_cartesian.mesh", + joinpath(@__DIR__, "mesh_uniform_cartesian.mesh")) + mesh = UnstructuredMesh2D(mesh_file, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/dgmulti_2d/elixir_euler_hohqmesh.jl b/examples/dgmulti_2d/elixir_euler_hohqmesh.jl index f534b5bc8ad..9b14a5c6827 100644 --- a/examples/dgmulti_2d/elixir_euler_hohqmesh.jl +++ b/examples/dgmulti_2d/elixir_euler_hohqmesh.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -30,12 +29,8 @@ dg = DGMulti(polydeg = 8, element_type = Quad(), approximation_type = SBP(), ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", + joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh")) mesh = DGMultiMesh(dg, mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl b/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl index 0a50b3644f0..4bfb2d3e375 100644 --- a/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl +++ b/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping_flag, diff --git a/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl b/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl index 37fcc547f60..1ab96925fe6 100644 --- a/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl +++ b/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -29,10 +28,8 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping_flag, diff --git a/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl index 0ca4fdc2eb7..5db5f74a686 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -50,10 +49,8 @@ volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; solver = DGSEM(basis, surface_flux, volume_integral) # Unstructured mesh with 48 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", + joinpath(@__DIR__, "square_unstructured_1.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, initial_refinement_level = 1) diff --git a/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl index 92928146d7b..fbc11e89185 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -99,11 +98,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_double_mach.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/a0806ef0d03cf5ea221af523167b6e32/raw/61ed0eb017eb432d996ed119a52fb041fe363e8c/abaqus_double_mach.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/a0806ef0d03cf5ea221af523167b6e32/raw/61ed0eb017eb432d996ed119a52fb041fe363e8c/abaqus_double_mach.inp", + joinpath(@__DIR__, "abaqus_double_mach.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl index 0ec9fc222f2..654efd5e209 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -104,11 +103,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_forward_step.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/b346ee6aa5446687f128eab8b37d52a7/raw/cd1e1d43bebd8d2631a07caec45585ec8456ca4c/abaqus_forward_step.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/b346ee6aa5446687f128eab8b37d52a7/raw/cd1e1d43bebd8d2631a07caec45585ec8456ca4c/abaqus_forward_step.inp", + joinpath(@__DIR__, "abaqus_forward_step.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl b/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl index 38307a7d781..ab11dc11567 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ end # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 48 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", + joinpath(@__DIR__, "square_unstructured_1.inp")) # Map the unstructured mesh with the mapping above mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping, initial_refinement_level = 1) diff --git a/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl index 09d018309a6..084fd699b8e 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ mapping_flag = Trixi.transfinite_mapping(faces) # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping_flag, diff --git a/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl b/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl index 36c5624ba97..76ee96d4766 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl @@ -13,7 +13,6 @@ # # Keywords: supersonic flow, shock capturing, AMR, unstructured curved mesh, positivity preservation, compressible Euler, 2D -using Downloads: download using OrdinaryDiffEq using Trixi @@ -82,11 +81,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_cylinder_in_channel.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/a08f78f6b185b63c3baeff911a63f628/raw/addac716ea0541f588b9d2bd3f92f643eb27b88f/abaqus_cylinder_in_channel.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/a08f78f6b185b63c3baeff911a63f628/raw/addac716ea0541f588b9d2bd3f92f643eb27b88f/abaqus_cylinder_in_channel.inp", + joinpath(@__DIR__, "abaqus_cylinder_in_channel.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl index 8b8d05bade8..75e60d0c78b 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -39,11 +38,8 @@ solver = DGSEM(polydeg = 5, surface_flux = flux_lax_friedrichs, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_gingerbread_man.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/0e9e990a04b5105d1d2e3096a6e41272/raw/0d924b1d7e7d3cc1070a6cc22fe1d501687aa6dd/abaqus_gingerbread_man.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/0e9e990a04b5105d1d2e3096a6e41272/raw/0d924b1d7e7d3cc1070a6cc22fe1d501687aa6dd/abaqus_gingerbread_man.inp", + joinpath(@__DIR__, "abaqus_gingerbread_man.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl b/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl index 380db487356..089e82580c9 100644 --- a/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -69,10 +68,8 @@ function mapping_twist(xi, eta) return SVector(x, y) end -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 4, diff --git a/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl b/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl index cd280cf5bf6..33afd2e030e 100644 --- a/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl +++ b/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -49,10 +48,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) # Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). mesh = P4estMesh{3}(mesh_file, polydeg = 2, diff --git a/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl b/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl index 6df9ac0b16a..83adcbf6a63 100644 --- a/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl +++ b/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -46,10 +45,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) mesh = P4estMesh{3}(mesh_file, polydeg = 3, mapping = mapping, diff --git a/examples/p4est_3d_dgsem/elixir_euler_ec.jl b/examples/p4est_3d_dgsem/elixir_euler_ec.jl index d9d774a7ffc..91698545052 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_ec.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -46,10 +45,8 @@ function mapping(xi_, eta_, zeta_) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) mesh = P4estMesh{3}(mesh_file, polydeg = 5, mapping = mapping, diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl index 24a781ca59e..6406a38186b 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -47,10 +46,8 @@ function mapping(xi_, eta_, zeta_) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). mesh = P4estMesh{3}(mesh_file, polydeg = 2, diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl index f56fe3a429d..08307a449a7 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -36,10 +35,8 @@ function mapping(xi, eta_, zeta_) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) mesh = P4estMesh{3}(mesh_file, polydeg = 3, mapping = mapping, diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl index 0de22eaea40..e7ca0cad4ba 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -49,10 +48,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). mesh = P4estMesh{3}(mesh_file, polydeg = 2, diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl index 0fa3a28fe8b..7d81d6739bf 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -19,11 +18,8 @@ boundary_conditions = Dict(:Bottom => boundary_condition, solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) # Unstructured 3D half circle mesh from HOHQMesh -default_mesh_file = joinpath(@__DIR__, "abaqus_half_circle_3d.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/11461efbfb02c42e06aca338b3d0b645/raw/81deeb1ebc4945952c30af5bb75fe222a18d975c/abaqus_half_circle_3d.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/11461efbfb02c42e06aca338b3d0b645/raw/81deeb1ebc4945952c30af5bb75fe222a18d975c/abaqus_half_circle_3d.inp", + joinpath(@__DIR__, "abaqus_half_circle_3d.inp")) mesh = P4estMesh{3}(mesh_file, initial_refinement_level = 0) diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl b/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl index 61dd252fd83..a6a56aa807c 100644 --- a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl +++ b/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl index f285d24fc6c..0923e328487 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl index 5ba1ab15489..ba8f1b59b80 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -28,10 +27,8 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n. -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl index 37d15f38566..5e6c4193c50 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -30,10 +29,8 @@ end # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 48 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", + joinpath(@__DIR__, "square_unstructured_1.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl index bcc1abc560e..e496eb76729 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -30,10 +29,8 @@ mapping_flag = Trixi.transfinite_mapping(faces) # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl index adb154948fb..ff2e40ae607 100644 --- a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -68,10 +67,8 @@ function mapping_twist(xi, eta) return SVector(x, y) end -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl index 617736afbdd..e7c0f4b7318 100644 --- a/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl +++ b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -47,11 +46,9 @@ function mapping(xi, eta, zeta) return SVector(5 * x, 5 * y, 5 * z) end -# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3. +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connectivity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl index df358435c9a..ee27ee117fe 100644 --- a/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl +++ b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -45,10 +44,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connectivity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_euler_ec.jl b/examples/t8code_3d_dgsem/elixir_euler_ec.jl index 07745c3ac56..b720bfcd375 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_ec.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_ec.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -45,10 +44,8 @@ function mapping(xi_, eta_, zeta_) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connectivity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl index e135d464810..b70a6091adf 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -46,10 +45,8 @@ function mapping(xi_, eta_, zeta_) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connectivity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl index d129b59826e..6ae38d20b5a 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -35,10 +34,8 @@ function mapping(xi, eta_, zeta_) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl index d4664522bea..6856be36ea1 100644 --- a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl +++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -48,10 +47,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which diff --git a/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl b/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl index 8f8e867dca8..9741430d11c 100644 --- a/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl +++ b/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -14,11 +13,8 @@ equations = AcousticPerturbationEquations2D(v_mean_global = (0.0, -0.5), solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) # Create unstructured quadrilateral mesh from a file -default_mesh_file = joinpath(@__DIR__, "mesh_five_circles_in_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/3c79baad6b4d73bb26ec6420b5d16f45/raw/22aefc4ec2107cf0bffc40e81dfbc52240c625b1/mesh_five_circles_in_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/3c79baad6b4d73bb26ec6420b5d16f45/raw/22aefc4ec2107cf0bffc40e81dfbc52240c625b1/mesh_five_circles_in_circle.mesh", + joinpath(@__DIR__, "mesh_five_circles_in_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_advection_basic.jl b/examples/unstructured_2d_dgsem/elixir_advection_basic.jl index afef6c2c38f..c0ee453344d 100644 --- a/examples/unstructured_2d_dgsem/elixir_advection_basic.jl +++ b/examples/unstructured_2d_dgsem/elixir_advection_basic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -16,12 +15,8 @@ solver = DGSEM(polydeg = 6, surface_flux = flux_lax_friedrichs) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_basic.jl b/examples/unstructured_2d_dgsem/elixir_euler_basic.jl index cd6a1995757..f8976120d53 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_basic.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_basic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -25,12 +24,8 @@ solver = DGSEM(polydeg = 8, surface_flux = flux_lax_friedrichs) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", + joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_ec.jl b/examples/unstructured_2d_dgsem/elixir_euler_ec.jl index 0f53aa62a18..58b4d9a1dd2 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_ec.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -19,12 +18,8 @@ solver = DGSEM(polydeg = 6, surface_flux = flux_ranocha, ############################################################################### # Get the curved quad mesh from a file - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl b/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl index a2fec1a320a..f266a3de0b2 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -26,12 +25,8 @@ solver = DGSEM(polydeg = 6, surface_flux = flux_hll) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_gingerbread_man.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", + joinpath(@__DIR__, "mesh_gingerbread_man.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl b/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl index afd177f0740..e640001ad7f 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -19,12 +18,8 @@ solver = DGSEM(polydeg = 6, surface_flux = FluxRotated(flux_hll)) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl b/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl index e1cc0932969..06053273b74 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl @@ -55,11 +55,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the curved quad mesh from a file -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl b/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl index b2abefe7eeb..65e5eb51ce6 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -40,12 +39,8 @@ solver = DGSEM(polydeg = 4, surface_flux = flux_hll) ############################################################################### # Get the curved quad mesh from a file - -default_mesh_file = joinpath(@__DIR__, "mesh_box_around_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8b9b11a1eedfa54b215c122c3d17b271/raw/0d2b5d98c87e67a6f384693a8b8e54b4c9fcbf3d/mesh_box_around_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8b9b11a1eedfa54b215c122c3d17b271/raw/0d2b5d98c87e67a6f384693a8b8e54b4c9fcbf3d/mesh_box_around_circle.mesh", + joinpath(@__DIR__, "mesh_box_around_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl b/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl index 3ed3e828ca8..0c7152a6ea0 100644 --- a/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl +++ b/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -17,12 +16,9 @@ solver = DGSEM(polydeg = 7, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) -mesh_file = default_mesh_file mesh = UnstructuredMesh2D(mesh_file, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl b/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl index a40f92cac02..805934e305d 100644 --- a/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl +++ b/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -35,11 +34,8 @@ solver = DGSEM(polydeg = 6, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl index 1148f25fae3..df1a69192ce 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -38,11 +37,8 @@ solver = DGSEM(polydeg = 4, surface_flux = (flux_hll, flux_nonconservative_fjord # This setup is for the curved, split form well-balancedness testing # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_outer_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", + joinpath(@__DIR__, "mesh_outer_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl index 8e9d396d826..9122fb8287d 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -25,11 +24,8 @@ solver = DGSEM(polydeg = 6, # This setup is for the curved, split form entropy conservation testing (needs periodic BCs) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl index 94202b81df0..98408db5a78 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -34,11 +33,8 @@ solver = DGSEM(basis, surface_flux, volume_integral) # This setup is for the curved, split form entropy conservation testing (needs periodic BCs) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl index 07668688406..a7aa5808955 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -23,11 +22,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, # This setup is for the curved, split form convergence test on a periodic domain # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl index 6164f9d4a55..df321aad267 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -93,16 +92,10 @@ solver = DGSEM(basis, surface_flux, volume_integral) ############################################################################### # Get the unstructured quad mesh from a file (downloads the file if not available locally) +mesh_file = Trixi.download("https://gist.githubusercontent.com/svengoldberg/c3c87fecb3fc6e46be7f0d1c7cb35f83/raw/e817ecd9e6c4686581d63c46128f9b6468d396d3/mesh_three_mound.mesh", + joinpath(@__DIR__, "mesh_three_mound.mesh")) -default_meshfile = joinpath(@__DIR__, "mesh_three_mound.mesh") - -isfile(default_meshfile) || - download("https://gist.githubusercontent.com/svengoldberg/c3c87fecb3fc6e46be7f0d1c7cb35f83/raw/e817ecd9e6c4686581d63c46128f9b6468d396d3/mesh_three_mound.mesh", - default_meshfile) - -meshfile = default_meshfile - -mesh = UnstructuredMesh2D(meshfile) +mesh = UnstructuredMesh2D(mesh_file) # Create the semi discretization object semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver; diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl index 0b86095663a..fcc08b6f991 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -24,11 +23,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, # This setup is for the curved, split form convergence test on a periodic domain # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl index 4ad5f7e3201..821f31c52ac 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -48,11 +47,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, ############################################################################### # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = false) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl index 6a727df2502..ca1f54595bb 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -42,11 +41,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, # This setup is for the curved, split form well-balancedness testing # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl index 76b9642d595..f115113ed27 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -55,12 +54,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, ############################################################################### # Get the unstructured quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_outer_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", + joinpath(@__DIR__, "mesh_outer_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl index bf4d0be682a..6bad3a77f03 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -36,13 +35,9 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, ############################################################################### # This setup is for the curved, split form well-balancedness testing - # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl b/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl index c181203e7a4..fe7e708f3b3 100644 --- a/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl +++ b/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -21,12 +20,8 @@ solver = FDSBP(D_SBP, ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl index 7ada50c0c65..25a81c16bf9 100644 --- a/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl +++ b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -33,12 +32,8 @@ solver = FDSBP(D_SBP, ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_gingerbread_man.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", + joinpath(@__DIR__, "mesh_gingerbread_man.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl index edcd221bf59..5f11d41ad5c 100644 --- a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl +++ b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -22,12 +21,8 @@ solver = FDSBP(D_SBP, ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/src/Trixi.jl b/src/Trixi.jl index e18b2f6415c..8d74fbc9736 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -39,6 +39,7 @@ import SciMLBase: get_du, get_tmp_cache, u_modified!, get_proposed_dt, set_proposed_dt!, terminate!, remake, add_tstop!, has_tstop, first_tstop +using Downloads: Downloads using CodeTracking: CodeTracking using ConstructionBase: ConstructionBase using DiffEqCallbacks: PeriodicCallback, PeriodicCallbackAffect diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index 1f7d30d6aa8..92da9a5ba8b 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -345,4 +345,27 @@ function register_error_hints() return nothing end + +""" + Trixi.download(src_url, file_path) + +Download a file from given `src_url` to given `file_path` if +`file_path` is not already a file. This function just returns +`file_path`. +This is a small wrapper of `Downloads.download(src_url, file_path)` +that avoids race conditions when multiple MPI ranks are used. +""" +function download(src_url, file_path) + # Note that `mpi_isroot()` is also `true` if running + # in serial (without MPI). + if mpi_isroot() + isfile(file_path) || Downloads.download(src_url, file_path) + end + + if mpi_isparallel() + MPI.Barrier(mpi_comm()) + end + + return file_path +end end # @muladd diff --git a/src/auxiliary/t8code.jl b/src/auxiliary/t8code.jl index db01476bb86..7c1399fc803 100644 --- a/src/auxiliary/t8code.jl +++ b/src/auxiliary/t8code.jl @@ -46,230 +46,6 @@ function init_t8code() return nothing end -function trixi_t8_unref_forest(forest) - t8_forest_unref(Ref(forest)) -end - -function t8_free(ptr) - T8code.Libt8.sc_free(t8_get_package_id(), ptr) -end - -function trixi_t8_count_interfaces(forest) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(forest) != 0 - - # Get the number of local elements of forest. - num_local_elements = t8_forest_get_local_num_elements(forest) - # Get the number of ghost elements of forest. - num_ghost_elements = t8_forest_get_num_ghosts(forest) - # Get the number of trees that have elements of this process. - num_local_trees = t8_forest_get_num_local_trees(forest) - - current_index = t8_locidx_t(0) - - local_num_conform = 0 - local_num_mortars = 0 - local_num_boundary = 0 - - for itree in 0:(num_local_trees - 1) - tree_class = t8_forest_get_tree_class(forest, itree) - eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class) - - # Get the number of elements of this tree. - num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree) - - for ielement in 0:(num_elements_in_tree - 1) - element = t8_forest_get_element_in_tree(forest, itree, ielement) - - level = t8_element_level(eclass_scheme, element) - - num_faces = t8_element_num_faces(eclass_scheme, element) - - for iface in 0:(num_faces - 1) - pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() - pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() - pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() - - dual_faces_ref = Ref{Ptr{Cint}}() - num_neighbors_ref = Ref{Cint}() - - forest_is_balanced = Cint(1) - - t8_forest_leaf_face_neighbors(forest, itree, element, - pneighbor_leafs_ref, iface, dual_faces_ref, - num_neighbors_ref, - pelement_indices_ref, pneigh_scheme_ref, - forest_is_balanced) - - num_neighbors = num_neighbors_ref[] - neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], - num_neighbors) - neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) - neighbor_scheme = pneigh_scheme_ref[] - - if num_neighbors > 0 - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) - - # Conforming interface: The second condition ensures we only visit the interface once. - if level == neighbor_level && current_index <= neighbor_ielements[1] - local_num_conform += 1 - elseif level < neighbor_level - local_num_mortars += 1 - end - else - local_num_boundary += 1 - end - - t8_free(dual_faces_ref[]) - t8_free(pneighbor_leafs_ref[]) - t8_free(pelement_indices_ref[]) - end # for - - current_index += 1 - end # for - end # for - - return (interfaces = local_num_conform, - mortars = local_num_mortars, - boundaries = local_num_boundary) -end - -function trixi_t8_fill_mesh_info(forest, elements, interfaces, mortars, boundaries, - boundary_names) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(forest) != 0 - - # Get the number of local elements of forest. - num_local_elements = t8_forest_get_local_num_elements(forest) - # Get the number of ghost elements of forest. - num_ghost_elements = t8_forest_get_num_ghosts(forest) - # Get the number of trees that have elements of this process. - num_local_trees = t8_forest_get_num_local_trees(forest) - - current_index = t8_locidx_t(0) - - local_num_conform = 0 - local_num_mortars = 0 - local_num_boundary = 0 - - for itree in 0:(num_local_trees - 1) - tree_class = t8_forest_get_tree_class(forest, itree) - eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class) - - # Get the number of elements of this tree. - num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree) - - for ielement in 0:(num_elements_in_tree - 1) - element = t8_forest_get_element_in_tree(forest, itree, ielement) - - level = t8_element_level(eclass_scheme, element) - - num_faces = t8_element_num_faces(eclass_scheme, element) - - for iface in 0:(num_faces - 1) - - # Compute the `orientation` of the touching faces. - if t8_element_is_root_boundary(eclass_scheme, element, iface) == 1 - cmesh = t8_forest_get_cmesh(forest) - itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(forest, itree) - iface_in_tree = t8_element_tree_face(eclass_scheme, element, iface) - orientation_ref = Ref{Cint}() - - t8_cmesh_get_face_neighbor(cmesh, itree_in_cmesh, iface_in_tree, C_NULL, - orientation_ref) - orientation = orientation_ref[] - else - orientation = zero(Cint) - end - - pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() - pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() - pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() - - dual_faces_ref = Ref{Ptr{Cint}}() - num_neighbors_ref = Ref{Cint}() - - forest_is_balanced = Cint(1) - - t8_forest_leaf_face_neighbors(forest, itree, element, - pneighbor_leafs_ref, iface, dual_faces_ref, - num_neighbors_ref, - pelement_indices_ref, pneigh_scheme_ref, - forest_is_balanced) - - num_neighbors = num_neighbors_ref[] - dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) - neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], - num_neighbors) - neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) - neighbor_scheme = pneigh_scheme_ref[] - - if num_neighbors > 0 - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) - - # Conforming interface: The second condition ensures we only visit the interface once. - if level == neighbor_level && current_index <= neighbor_ielements[1] - local_num_conform += 1 - - faces = (iface, dual_faces[1]) - interface_id = local_num_conform - - # Write data to interfaces container. - interfaces.neighbor_ids[1, interface_id] = current_index + 1 - interfaces.neighbor_ids[2, interface_id] = neighbor_ielements[1] + 1 - - # Save interfaces.node_indices dimension specific in containers_3d.jl. - init_interface_node_indices!(interfaces, faces, orientation, - interface_id) - # Non-conforming interface. - elseif level < neighbor_level - local_num_mortars += 1 - - faces = (dual_faces[1], iface) - - mortar_id = local_num_mortars - - # Last entry is the large element. - mortars.neighbor_ids[end, mortar_id] = current_index + 1 - - # Fill in the `mortars.neighbor_ids` array and reorder if necessary. - init_mortar_neighbor_ids!(mortars, faces[2], faces[1], - orientation, neighbor_ielements, - mortar_id) - - # Fill in the `mortars.node_indices` array. - init_mortar_node_indices!(mortars, faces, orientation, mortar_id) - - # else: "level > neighbor_level" is skipped since we visit the mortar interface only once. - end - - # Domain boundary. - else - local_num_boundary += 1 - boundary_id = local_num_boundary - - boundaries.neighbor_ids[boundary_id] = current_index + 1 - - init_boundary_node_indices!(boundaries, iface, boundary_id) - - # One-based indexing. - boundaries.name[boundary_id] = boundary_names[iface + 1, itree + 1] - end - - t8_free(dual_faces_ref[]) - t8_free(pneighbor_leafs_ref[]) - t8_free(pelement_indices_ref[]) - end # for iface = ... - - current_index += 1 - end # for - end # for - - return (interfaces = local_num_conform, - mortars = local_num_mortars, - boundaries = local_num_boundary) -end - function trixi_t8_get_local_element_levels(forest) # Check that forest is a committed, that is valid and usable, forest. @assert t8_forest_is_committed(forest) != 0 @@ -341,23 +117,16 @@ function adapt_callback(forest, end function trixi_t8_adapt_new(old_forest, indicators) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(old_forest) != 0 - - # Init new forest. new_forest_ref = Ref{t8_forest_t}() t8_forest_init(new_forest_ref) new_forest = new_forest_ref[] - let set_from = C_NULL, recursive = 0, set_for_coarsening = 0, no_repartition = 0, - do_ghost = 1 - + let set_from = C_NULL, recursive = 0, no_repartition = 1, do_ghost = 1 t8_forest_set_user_data(new_forest, pointer(indicators)) t8_forest_set_adapt(new_forest, old_forest, @t8_adapt_callback(adapt_callback), recursive) t8_forest_set_balance(new_forest, set_from, no_repartition) - t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) # Note: MPI support not available yet so it is a dummy call. + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) t8_forest_commit(new_forest) end diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 5854c8617c3..6f57d6647fc 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -726,7 +726,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, return has_changed end -function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::SerialT8codeMesh, +function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::T8codeMesh, equations, dg::DG, cache, semi, t, iter; only_refine = false, only_coarsen = false, @@ -754,29 +754,29 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::SerialT8codeMe @trixi_timeit timer() "adapt" begin difference = @trixi_timeit timer() "mesh" trixi_t8_adapt!(mesh, indicators) - @trixi_timeit timer() "solver" adapt!(u_ode, adaptor, mesh, equations, dg, - cache, difference) - end + # Store whether there were any cells coarsened or refined and perform load balancing. + has_changed = any(difference .!= 0) - # Store whether there were any cells coarsened or refined and perform load balancing. - has_changed = any(difference .!= 0) + # Check if mesh changed on other processes + if mpi_isparallel() + has_changed = MPI.Allreduce!(Ref(has_changed), |, mpi_comm())[] + end - # TODO: T8codeMesh for MPI not implemented yet. - # Check if mesh changed on other processes - # if mpi_isparallel() - # has_changed = MPI.Allreduce!(Ref(has_changed), |, mpi_comm())[] - # end + if has_changed + @trixi_timeit timer() "solver" adapt!(u_ode, adaptor, mesh, equations, dg, + cache, difference) + end + end if has_changed - # TODO: T8codeMesh for MPI not implemented yet. - # if mpi_isparallel() && amr_callback.dynamic_load_balancing - # @trixi_timeit timer() "dynamic load balancing" begin - # global_first_quadrant = unsafe_wrap(Array, mesh.p4est.global_first_quadrant, mpi_nranks() + 1) - # old_global_first_quadrant = copy(global_first_quadrant) - # partition!(mesh) - # rebalance_solver!(u_ode, mesh, equations, dg, cache, old_global_first_quadrant) - # end - # end + if mpi_isparallel() && amr_callback.dynamic_load_balancing + @trixi_timeit timer() "dynamic load balancing" begin + old_global_first_element_ids = get_global_first_element_ids(mesh) + partition!(mesh) + rebalance_solver!(u_ode, mesh, equations, dg, cache, + old_global_first_element_ids) + end + end reinitialize_boundaries!(semi.boundary_conditions, cache) end diff --git a/src/callbacks_step/amr_dg.jl b/src/callbacks_step/amr_dg.jl index 1dcfdccdea8..0a7055af409 100644 --- a/src/callbacks_step/amr_dg.jl +++ b/src/callbacks_step/amr_dg.jl @@ -6,11 +6,14 @@ #! format: noindent # Redistribute data for load balancing after partitioning the mesh -function rebalance_solver!(u_ode::AbstractVector, mesh::ParallelP4estMesh, equations, +function rebalance_solver!(u_ode::AbstractVector, + mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, + equations, dg::DGSEM, cache, old_global_first_quadrant) - # mpi ranks are 0-based, this array uses 1-based indices - global_first_quadrant = unsafe_wrap(Array, mesh.p4est.global_first_quadrant, - mpi_nranks() + 1) + + # MPI ranks are 0-based. This array uses 1-based indices. + global_first_quadrant = get_global_first_element_ids(mesh) + if global_first_quadrant[mpi_rank() + 1] == old_global_first_quadrant[mpi_rank() + 1] && global_first_quadrant[mpi_rank() + 2] == diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index b816bc06e65..94524b23a3a 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -385,7 +385,12 @@ function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{2}, equations, # Return early if there is nothing to do. if !any(difference .!= 0) - return nothing + if mpi_isparallel() + # MPICache init uses all-to-all communication -> reinitialize even if there is nothing to do + # locally (there still might be other MPI ranks that have refined elements) + reinitialize_containers!(mesh, equations, dg, cache) + end + return end # Number of (local) cells/elements. diff --git a/src/callbacks_step/amr_dg3d.jl b/src/callbacks_step/amr_dg3d.jl index 392cbba9e28..3f67951bafe 100644 --- a/src/callbacks_step/amr_dg3d.jl +++ b/src/callbacks_step/amr_dg3d.jl @@ -316,7 +316,12 @@ function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{3}, equations, # Return early if there is nothing to do. if !any(difference .!= 0) - return nothing + if mpi_isparallel() + # MPICache init uses all-to-all communication -> reinitialize even if there is nothing to do + # locally (there still might be other MPI ranks that have refined elements) + reinitialize_containers!(mesh, equations, dg, cache) + end + return end # Number of (local) cells/elements. diff --git a/src/callbacks_step/analysis_dg2d_parallel.jl b/src/callbacks_step/analysis_dg2d_parallel.jl index a04bf732604..000daa015dc 100644 --- a/src/callbacks_step/analysis_dg2d_parallel.jl +++ b/src/callbacks_step/analysis_dg2d_parallel.jl @@ -91,7 +91,8 @@ function calc_error_norms_per_element(func, u, t, analyzer, end function calc_error_norms(func, u, t, analyzer, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, + equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @unpack node_coordinates, inverse_jacobian = cache.elements @@ -171,7 +172,8 @@ function integrate_via_indices(func::Func, u, end function integrate_via_indices(func::Func, u, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, + equations, dg::DGSEM, cache, args...; normalize = true) where {Func} @unpack weights = dg.basis diff --git a/src/callbacks_step/analysis_dg3d_parallel.jl b/src/callbacks_step/analysis_dg3d_parallel.jl index d8756d91c9d..de777be406d 100644 --- a/src/callbacks_step/analysis_dg3d_parallel.jl +++ b/src/callbacks_step/analysis_dg3d_parallel.jl @@ -6,7 +6,8 @@ #! format: noindent function calc_error_norms(func, u, t, analyzer, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, + equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @unpack node_coordinates, inverse_jacobian = cache.elements @@ -64,7 +65,8 @@ function calc_error_norms(func, u, t, analyzer, end function integrate_via_indices(func::Func, u, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, + equations, dg::DGSEM, cache, args...; normalize = true) where {Func} @unpack weights = dg.basis diff --git a/src/callbacks_step/stepsize_dg2d.jl b/src/callbacks_step/stepsize_dg2d.jl index 673c3ba6aa6..c6d32c0f6dc 100644 --- a/src/callbacks_step/stepsize_dg2d.jl +++ b/src/callbacks_step/stepsize_dg2d.jl @@ -174,4 +174,36 @@ function max_dt(u, t, mesh::ParallelP4estMesh{2}, return dt end + +function max_dt(u, t, mesh::ParallelT8codeMesh{2}, + constant_speed::False, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{2}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{2}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end + +function max_dt(u, t, mesh::ParallelT8codeMesh{2}, + constant_speed::True, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{2}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{2}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end end # @muladd diff --git a/src/callbacks_step/stepsize_dg3d.jl b/src/callbacks_step/stepsize_dg3d.jl index 822ab2f87ec..664596f989e 100644 --- a/src/callbacks_step/stepsize_dg3d.jl +++ b/src/callbacks_step/stepsize_dg3d.jl @@ -150,4 +150,36 @@ function max_dt(u, t, mesh::ParallelP4estMesh{3}, return dt end + +function max_dt(u, t, mesh::ParallelT8codeMesh{3}, + constant_speed::False, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{3}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{3}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end + +function max_dt(u, t, mesh::ParallelT8codeMesh{3}, + constant_speed::True, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{3}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{3}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end end # @muladd diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl index c5d39ef00c0..abe9d9345b5 100644 --- a/src/meshes/p4est_mesh.jl +++ b/src/meshes/p4est_mesh.jl @@ -1700,6 +1700,10 @@ function bilinear_interpolation!(coordinate, face_vertices, u, v) end end +function get_global_first_element_ids(mesh::P4estMesh) + return unsafe_wrap(Array, mesh.p4est.global_first_quadrant, mpi_nranks() + 1) +end + function balance!(mesh::P4estMesh{2}, init_fn = C_NULL) p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn) # Due to a bug in `p4est`, the forest needs to be rebalanced twice sometimes diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index c9665a22af9..6fb4d861d10 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -7,8 +7,6 @@ to manage trees and mesh refinement. """ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: AbstractMesh{NDIMS} - cmesh :: Ptr{t8_cmesh} # cpointer to coarse mesh - scheme :: Ptr{t8_eclass_scheme} # cpointer to element scheme forest :: Ptr{t8_forest} # cpointer to forest is_parallel :: IsParallel @@ -25,14 +23,15 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: nmortars :: Int nboundaries :: Int - function T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes, + nmpiinterfaces :: Int + nmpimortars :: Int + + function T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes, boundary_names, current_filename) where {NDIMS} - is_parallel = False() + is_parallel = mpi_isparallel() ? True() : False() - mesh = new{NDIMS, Float64, typeof(is_parallel), NDIMS + 2, length(nodes)}(cmesh, - scheme, - forest, + mesh = new{NDIMS, Float64, typeof(is_parallel), NDIMS + 2, length(nodes)}(forest, is_parallel) mesh.nodes = nodes @@ -52,7 +51,7 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: # further down. However, this might cause a pile-up of `mesh` # objects during long-running sessions. if !MPI.Finalized() - trixi_t8_unref_forest(mesh.forest) + t8_forest_unref(Ref(mesh.forest)) end end @@ -63,7 +62,7 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: # more information. if haskey(ENV, "TRIXI_T8CODE_SC_FINALIZE") MPI.add_finalize_hook!() do - trixi_t8_unref_forest(mesh.forest) + t8_forest_unref(Ref(mesh.forest)) end end @@ -72,16 +71,15 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: end const SerialT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:False} +const ParallelT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:True} @inline mpi_parallel(mesh::SerialT8codeMesh) = False() +@inline mpi_parallel(mesh::ParallelT8codeMesh) = True() @inline Base.ndims(::T8codeMesh{NDIMS}) where {NDIMS} = NDIMS @inline Base.real(::T8codeMesh{NDIMS, RealT}) where {NDIMS, RealT} = RealT -@inline ntrees(mesh::T8codeMesh) = Int(t8_forest_get_num_local_trees(mesh.forest)) +@inline ntrees(mesh::T8codeMesh) = size(mesh.tree_node_coordinates)[end] @inline ncells(mesh::T8codeMesh) = Int(t8_forest_get_local_num_elements(mesh.forest)) -@inline ninterfaces(mesh::T8codeMesh) = mesh.ninterfaces -@inline nmortars(mesh::T8codeMesh) = mesh.nmortars -@inline nboundaries(mesh::T8codeMesh) = mesh.nboundaries function Base.show(io::IO, mesh::T8codeMesh) print(io, "T8codeMesh{", ndims(mesh), ", ", real(mesh), "}") @@ -184,21 +182,23 @@ function T8codeMesh(trees_per_dimension; polydeg = 1, T8code.Libt8.p8est_connectivity_destroy(conn) end + do_face_ghost = mpi_isparallel() scheme = t8_scheme_new_default_cxx() - forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm()) + forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost, + mpi_comm()) basis = LobattoLegendreBasis(RealT, polydeg) nodes = basis.nodes + num_trees = t8_cmesh_get_num_trees(cmesh) + tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS, ntuple(_ -> length(nodes), NDIMS)..., - prod(trees_per_dimension)) + num_trees) # Get cell length in reference mesh: Omega_ref = [-1,1]^NDIMS. dx = [2 / n for n in trees_per_dimension] - num_local_trees = t8_cmesh_get_num_local_trees(cmesh) - # Non-periodic boundaries. boundary_names = fill(Symbol("---"), 2 * NDIMS, prod(trees_per_dimension)) @@ -208,7 +208,7 @@ function T8codeMesh(trees_per_dimension; polydeg = 1, mapping_ = mapping end - for itree in 1:num_local_trees + for itree in 1:num_trees veptr = t8_cmesh_get_tree_vertices(cmesh, itree - 1) verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS)) @@ -256,7 +256,7 @@ function T8codeMesh(trees_per_dimension; polydeg = 1, end end - return T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes, + return T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes, boundary_names, "") end @@ -290,21 +290,25 @@ function T8codeMesh(cmesh::Ptr{t8_cmesh}; @assert (NDIMS == 2||NDIMS == 3) "NDIMS should be 2 or 3." + do_face_ghost = mpi_isparallel() scheme = t8_scheme_new_default_cxx() - forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm()) + forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost, + mpi_comm()) basis = LobattoLegendreBasis(RealT, polydeg) nodes = basis.nodes - num_local_trees = t8_cmesh_get_num_local_trees(cmesh) + num_trees = t8_cmesh_get_num_trees(cmesh) tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS, ntuple(_ -> length(nodes), NDIMS)..., - num_local_trees) + num_trees) nodes_in = [-1.0, 1.0] matrix = polynomial_interpolation_matrix(nodes_in, nodes) + num_local_trees = t8_cmesh_get_num_local_trees(cmesh) + if NDIMS == 2 data_in = Array{RealT, 3}(undef, 2, 2, 2) tmp1 = zeros(RealT, 2, length(nodes), length(nodes_in)) @@ -353,7 +357,7 @@ function T8codeMesh(cmesh::Ptr{t8_cmesh}; tmp1 = zeros(RealT, 3, length(nodes), length(nodes_in), length(nodes_in)) verts = zeros(3, 8) - for itree in 0:(num_local_trees - 1) + for itree in 0:(num_trees - 1) veptr = t8_cmesh_get_tree_vertices(cmesh, itree) # Note, `verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))` @@ -387,9 +391,9 @@ function T8codeMesh(cmesh::Ptr{t8_cmesh}; map_node_coordinates!(tree_node_coordinates, mapping) # There's no simple and generic way to distinguish boundaries. Name all of them :all. - boundary_names = fill(:all, 2 * NDIMS, num_local_trees) + boundary_names = fill(:all, 2 * NDIMS, num_trees) - return T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes, + return T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes, boundary_names, "") end @@ -442,7 +446,7 @@ function T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...) end """ - T8codeMesh{NDIMS}(meshfile::String; kwargs...) + T8codeMesh(meshfile::String, ndims; kwargs...) Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming mesh from a Gmsh mesh file (`.msh`). @@ -461,7 +465,6 @@ mesh from a Gmsh mesh file (`.msh`). - `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. """ function T8codeMesh(meshfile::String, ndims; kwargs...) - # Prevent `t8code` from crashing Julia if the file doesn't exist. @assert isfile(meshfile) @@ -586,13 +589,525 @@ function adapt!(mesh::T8codeMesh, adapt_callback; recursive = true, balance = tr return nothing end -# TODO: Just a placeholder. Will be implemented later when MPI is supported. -function balance!(mesh::T8codeMesh, init_fn = C_NULL) +""" + Trixi.balance!(mesh::T8codeMesh) + +Balance a `T8codeMesh` to ensure 2^(NDIMS-1):1 face neighbors. +""" +function balance!(mesh::T8codeMesh) + new_forest_ref = Ref{t8_forest_t}() + t8_forest_init(new_forest_ref) + new_forest = new_forest_ref[] + + let set_from = mesh.forest, no_repartition = 1, do_ghost = 1 + t8_forest_set_balance(new_forest, set_from, no_repartition) + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) + t8_forest_commit(new_forest) + end + + mesh.forest = new_forest + + return nothing +end + +""" + Trixi.partition!(mesh::T8codeMesh) + +Partition a `T8codeMesh` in order to redistribute elements evenly among MPI ranks. + +# Arguments +- `mesh::T8codeMesh`: Initialized mesh object. +""" +function partition!(mesh::T8codeMesh) + new_forest_ref = Ref{t8_forest_t}() + t8_forest_init(new_forest_ref) + new_forest = new_forest_ref[] + + let set_from = mesh.forest, do_ghost = 1, allow_for_coarsening = 1 + t8_forest_set_partition(new_forest, set_from, allow_for_coarsening) + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) + t8_forest_commit(new_forest) + end + + mesh.forest = new_forest + return nothing end -# TODO: Just a placeholder. Will be implemented later when MPI is supported. -function partition!(mesh::T8codeMesh; allow_coarsening = true, weight_fn = C_NULL) +# Compute the global ids (zero-indexed) of first element in each MPI rank. +function get_global_first_element_ids(mesh::T8codeMesh) + n_elements_local = Int(t8_forest_get_local_num_elements(mesh.forest)) + n_elements_by_rank = Vector{Int}(undef, mpi_nranks()) + n_elements_by_rank[mpi_rank() + 1] = n_elements_local + MPI.Allgather!(MPI.UBuffer(n_elements_by_rank, 1), mpi_comm()) + return [sum(n_elements_by_rank[1:(rank - 1)]) for rank in 1:(mpi_nranks() + 1)] +end + +function count_interfaces(mesh::T8codeMesh) + @assert t8_forest_is_committed(mesh.forest) != 0 + + num_local_elements = t8_forest_get_local_num_elements(mesh.forest) + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) + + current_index = t8_locidx_t(0) + + local_num_conform = 0 + local_num_mortars = 0 + local_num_boundary = 0 + + local_num_mpi_conform = 0 + local_num_mpi_mortars = 0 + + visited_global_mortar_ids = Set{UInt64}([]) + + max_level = t8_forest_get_maxlevel(mesh.forest) #UInt64 + max_tree_num_elements = UInt64(2^ndims(mesh))^max_level + + if mpi_isparallel() + ghost_num_trees = t8_forest_ghost_num_trees(mesh.forest) + + ghost_tree_element_offsets = [num_local_elements + + t8_forest_ghost_get_tree_element_offset(mesh.forest, + itree) + for itree in 0:(ghost_num_trees - 1)] + ghost_global_treeids = [t8_forest_ghost_get_global_treeid(mesh.forest, itree) + for itree in 0:(ghost_num_trees - 1)] + end + + for itree in 0:(num_local_trees - 1) + tree_class = t8_forest_get_tree_class(mesh.forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) + + num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + + global_itree = t8_forest_global_tree_id(mesh.forest, itree) + + for ielement in 0:(num_elements_in_tree - 1) + element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) + + level = t8_element_level(eclass_scheme, element) + + num_faces = t8_element_num_faces(eclass_scheme, element) + + # Note: This works only for forests of one element class. + current_linear_id = global_itree * max_tree_num_elements + + t8_element_get_linear_id(eclass_scheme, element, max_level) + + for iface in 0:(num_faces - 1) + pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() + pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() + pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() + + dual_faces_ref = Ref{Ptr{Cint}}() + num_neighbors_ref = Ref{Cint}() + + forest_is_balanced = Cint(1) + + t8_forest_leaf_face_neighbors(mesh.forest, itree, element, + pneighbor_leafs_ref, iface, dual_faces_ref, + num_neighbors_ref, + pelement_indices_ref, pneigh_scheme_ref, + forest_is_balanced) + + num_neighbors = num_neighbors_ref[] + dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) + neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], + num_neighbors) + neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) + neighbor_scheme = pneigh_scheme_ref[] + + if num_neighbors == 0 + local_num_boundary += 1 + else + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) + + if all(neighbor_ielements .< num_local_elements) + # Conforming interface: The second condition ensures we + # only visit the interface once. + if level == neighbor_level && current_index <= neighbor_ielements[1] + local_num_conform += 1 + elseif level < neighbor_level + local_num_mortars += 1 + # `else level > neighbor_level` is ignored since we + # only want to count the mortar interface once. + end + else + if level == neighbor_level + local_num_mpi_conform += 1 + elseif level < neighbor_level + local_num_mpi_mortars += 1 + + global_mortar_id = 2 * ndims(mesh) * current_linear_id + iface + + else # level > neighbor_level + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= + neighbor_ielements[1])] + neighbor_linear_id = neighbor_global_ghost_itree * + max_tree_num_elements + + t8_element_get_linear_id(neighbor_scheme, + neighbor_leafs[1], + max_level) + global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + + dual_faces[1] + + if !(global_mortar_id in visited_global_mortar_ids) + push!(visited_global_mortar_ids, global_mortar_id) + local_num_mpi_mortars += 1 + end + end + end + end + + t8_free(dual_faces_ref[]) + t8_free(pneighbor_leafs_ref[]) + t8_free(pelement_indices_ref[]) + end # for + + current_index += 1 + end # for + end # for + + return (interfaces = local_num_conform, + mortars = local_num_mortars, + boundaries = local_num_boundary, + mpi_interfaces = local_num_mpi_conform, + mpi_mortars = local_num_mpi_mortars) +end + +# I know this routine is an unmaintainable behemoth. However, I see no real +# and elegant way to refactor this into, for example, smaller parts. The +# `t8_forest_leaf_face_neighbors` routine is as of now rather costly and it +# makes sense to query it only once per face per element and extract all the +# information needed at once in order to fill the connectivity information. +# Instead, I opted for good documentation. +function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, + boundary_names; mpi_mesh_info = nothing) + @assert t8_forest_is_committed(mesh.forest) != 0 + + num_local_elements = t8_forest_get_local_num_elements(mesh.forest) + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) + + if !isnothing(mpi_mesh_info) + #! format: off + remotes = t8_forest_ghost_get_remotes(mesh.forest) + ghost_num_trees = t8_forest_ghost_num_trees(mesh.forest) + + ghost_remote_first_elem = [num_local_elements + + t8_forest_ghost_remote_first_elem(mesh.forest, remote) + for remote in remotes] + + ghost_tree_element_offsets = [num_local_elements + + t8_forest_ghost_get_tree_element_offset(mesh.forest, itree) + for itree in 0:(ghost_num_trees - 1)] + + ghost_global_treeids = [t8_forest_ghost_get_global_treeid(mesh.forest, itree) + for itree in 0:(ghost_num_trees - 1)] + #! format: on + end + + # Process-local index of the current element in the space-filling curve. + current_index = t8_locidx_t(0) + + # Increment counters for the different interface/mortar/boundary types. + local_num_conform = 0 + local_num_mortars = 0 + local_num_boundary = 0 + + local_num_mpi_conform = 0 + local_num_mpi_mortars = 0 + + # Works for quads and hexs only. This mapping is needed in the MPI mortar + # sections below. + map_iface_to_ichild_to_position = [ + # 0 1 2 3 4 5 6 7 ichild/iface + [1, 0, 2, 0, 3, 0, 4, 0], # 0 + [0, 1, 0, 2, 0, 3, 0, 4], # 1 + [1, 2, 0, 0, 3, 4, 0, 0], # 2 + [0, 0, 1, 2, 0, 0, 3, 4], # 3 + [1, 2, 3, 4, 0, 0, 0, 0], # 4 + [0, 0, 0, 0, 1, 2, 3, 4], # 5 + ] + + # Helper variables to compute unique global MPI interface/mortar ids. + max_level = t8_forest_get_maxlevel(mesh.forest) #UInt64 + max_tree_num_elements = UInt64(2^ndims(mesh))^max_level + + # These two variables help to ensure that we count MPI mortars from smaller + # elements point of view only once. + visited_global_mortar_ids = Set{UInt64}([]) + global_mortar_id_to_local = Dict{UInt64, Int}([]) + + # Loop over all local trees. + for itree in 0:(num_local_trees - 1) + tree_class = t8_forest_get_tree_class(mesh.forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) + + num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + + global_itree = t8_forest_global_tree_id(mesh.forest, itree) + + # Loop over all local elements of the current local tree. + for ielement in 0:(num_elements_in_tree - 1) + element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) + + level = t8_element_level(eclass_scheme, element) + + num_faces = t8_element_num_faces(eclass_scheme, element) + + # Note: This works only for forests of one element class. + current_linear_id = global_itree * max_tree_num_elements + + t8_element_get_linear_id(eclass_scheme, element, max_level) + + # Loop over all faces of the current local element. + for iface in 0:(num_faces - 1) + # Compute the `orientation` of the touching faces. + if t8_element_is_root_boundary(eclass_scheme, element, iface) == 1 + cmesh = t8_forest_get_cmesh(mesh.forest) + itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(mesh.forest, itree) + iface_in_tree = t8_element_tree_face(eclass_scheme, element, iface) + orientation_ref = Ref{Cint}() + + t8_cmesh_get_face_neighbor(cmesh, itree_in_cmesh, iface_in_tree, C_NULL, + orientation_ref) + orientation = orientation_ref[] + else + orientation = zero(Cint) + end + + pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() + pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() + pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() + + dual_faces_ref = Ref{Ptr{Cint}}() + num_neighbors_ref = Ref{Cint}() + + forest_is_balanced = Cint(1) + + # Query neighbor information from t8code. + t8_forest_leaf_face_neighbors(mesh.forest, itree, element, + pneighbor_leafs_ref, iface, dual_faces_ref, + num_neighbors_ref, + pelement_indices_ref, pneigh_scheme_ref, + forest_is_balanced) + + num_neighbors = num_neighbors_ref[] + dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) + neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], + num_neighbors) + neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) + neighbor_scheme = pneigh_scheme_ref[] + + # Now we check for the different cases. The nested if-structure is as follows: + # + # if `boundary`: + # + # + # else: // It must be an interface or mortar. + # + # if `all neighbors are local elements`: + # + # if `local interface`: + # + # elseif `local mortar from larger element point of view`: + # + # else: // `local mortar from smaller elements point of view` + # // We only count local mortars once. + # + # else: // It must be either a MPI interface or a MPI mortar. + # + # if `MPI interface`: + # + # elseif `MPI mortar from larger element point of view`: + # + # else: // `MPI mortar from smaller elements point of view` + # + # + # // end + + # Domain boundary. + if num_neighbors == 0 + local_num_boundary += 1 + boundary_id = local_num_boundary + + boundaries.neighbor_ids[boundary_id] = current_index + 1 + + init_boundary_node_indices!(boundaries, iface, boundary_id) + + # One-based indexing. + boundaries.name[boundary_id] = boundary_names[iface + 1, itree + 1] + + # Interface or mortar. + else + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) + + # Local interface or mortar. + if all(neighbor_ielements .< num_local_elements) + + # Local interface: The second condition ensures we only visit the interface once. + if level == neighbor_level && current_index <= neighbor_ielements[1] + local_num_conform += 1 + + interfaces.neighbor_ids[1, local_num_conform] = current_index + + 1 + interfaces.neighbor_ids[2, local_num_conform] = neighbor_ielements[1] + + 1 + + init_interface_node_indices!(interfaces, (iface, dual_faces[1]), + orientation, + local_num_conform) + # Local mortar. + elseif level < neighbor_level + local_num_mortars += 1 + + # Last entry is the large element. + mortars.neighbor_ids[end, local_num_mortars] = current_index + 1 + + init_mortar_neighbor_ids!(mortars, iface, dual_faces[1], + orientation, neighbor_ielements, + local_num_mortars) + + init_mortar_node_indices!(mortars, (dual_faces[1], iface), + orientation, local_num_mortars) + + # else: `level > neighbor_level` is skipped since we visit the mortar interface only once. + end + + # MPI interface or MPI mortar. + else + + # MPI interface. + if level == neighbor_level + local_num_mpi_conform += 1 + + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= + neighbor_ielements[1])] + + neighbor_linear_id = neighbor_global_ghost_itree * + max_tree_num_elements + + t8_element_get_linear_id(neighbor_scheme, + neighbor_leafs[1], + max_level) + + if current_linear_id < neighbor_linear_id + local_side = 1 + smaller_iface = iface + smaller_linear_id = current_linear_id + faces = (iface, dual_faces[1]) + else + local_side = 2 + smaller_iface = dual_faces[1] + smaller_linear_id = neighbor_linear_id + faces = (dual_faces[1], iface) + end + + global_interface_id = 2 * ndims(mesh) * smaller_linear_id + + smaller_iface + + mpi_mesh_info.mpi_interfaces.local_neighbor_ids[local_num_mpi_conform] = current_index + + 1 + mpi_mesh_info.mpi_interfaces.local_sides[local_num_mpi_conform] = local_side + + init_mpi_interface_node_indices!(mpi_mesh_info.mpi_interfaces, + faces, local_side, orientation, + local_num_mpi_conform) + + neighbor_rank = remotes[findlast(ghost_remote_first_elem .<= + neighbor_ielements[1])] + mpi_mesh_info.neighbor_ranks_interface[local_num_mpi_conform] = neighbor_rank + + mpi_mesh_info.global_interface_ids[local_num_mpi_conform] = global_interface_id + + # MPI Mortar: from larger element point of view + elseif level < neighbor_level + local_num_mpi_mortars += 1 + + global_mortar_id = 2 * ndims(mesh) * current_linear_id + iface + + neighbor_ids = neighbor_ielements .+ 1 + + local_neighbor_positions = findall(neighbor_ids .<= + num_local_elements) + local_neighbor_ids = [neighbor_ids[i] + for i in local_neighbor_positions] + local_neighbor_positions = [map_iface_to_ichild_to_position[dual_faces[1] + 1][t8_element_child_id(neighbor_scheme, neighbor_leafs[i]) + 1] + for i in local_neighbor_positions] + + # Last entry is the large element. + push!(local_neighbor_ids, current_index + 1) + push!(local_neighbor_positions, 2^(ndims(mesh) - 1) + 1) + + mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_num_mpi_mortars] = local_neighbor_ids + mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_num_mpi_mortars] = local_neighbor_positions + + init_mortar_node_indices!(mpi_mesh_info.mpi_mortars, + (dual_faces[1], iface), orientation, + local_num_mpi_mortars) + + neighbor_ranks = [remotes[findlast(ghost_remote_first_elem .<= + ineighbor_ghost)] + for ineighbor_ghost in filter(x -> x >= + num_local_elements, + neighbor_ielements)] + mpi_mesh_info.neighbor_ranks_mortar[local_num_mpi_mortars] = neighbor_ranks + + mpi_mesh_info.global_mortar_ids[local_num_mpi_mortars] = global_mortar_id + + # MPI Mortar: from smaller elements point of view + else + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= + neighbor_ielements[1])] + neighbor_linear_id = neighbor_global_ghost_itree * + max_tree_num_elements + + t8_element_get_linear_id(neighbor_scheme, + neighbor_leafs[1], + max_level) + global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + + dual_faces[1] + + if global_mortar_id in visited_global_mortar_ids + local_mpi_mortar_id = global_mortar_id_to_local[global_mortar_id] + + push!(mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_mpi_mortar_id], + current_index + 1) + push!(mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_mpi_mortar_id], + map_iface_to_ichild_to_position[iface + 1][t8_element_child_id(eclass_scheme, element) + 1]) + else + local_num_mpi_mortars += 1 + local_mpi_mortar_id = local_num_mpi_mortars + push!(visited_global_mortar_ids, global_mortar_id) + global_mortar_id_to_local[global_mortar_id] = local_mpi_mortar_id + + mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_mpi_mortar_id] = [ + current_index + 1, + ] + mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_mpi_mortar_id] = [ + map_iface_to_ichild_to_position[iface + 1][t8_element_child_id(eclass_scheme, element) + 1], + ] + init_mortar_node_indices!(mpi_mesh_info.mpi_mortars, + (iface, dual_faces[1]), + orientation, local_mpi_mortar_id) + + neighbor_ranks = [ + remotes[findlast(ghost_remote_first_elem .<= + neighbor_ielements[1])], + ] + mpi_mesh_info.neighbor_ranks_mortar[local_mpi_mortar_id] = neighbor_ranks + + mpi_mesh_info.global_mortar_ids[local_mpi_mortar_id] = global_mortar_id + end + end + end + end + + t8_free(dual_faces_ref[]) + t8_free(pneighbor_leafs_ref[]) + t8_free(pelement_indices_ref[]) + end # for iface + + current_index += 1 + end # for ielement + end # for itree + return nothing end diff --git a/src/solvers/dgsem_p4est/containers_parallel.jl b/src/solvers/dgsem_p4est/containers_parallel.jl index 7c7bd868457..fd2749155bb 100644 --- a/src/solvers/dgsem_p4est/containers_parallel.jl +++ b/src/solvers/dgsem_p4est/containers_parallel.jl @@ -43,7 +43,8 @@ function Base.resize!(mpi_interfaces::P4estMPIInterfaceContainer, capacity) end # Create MPI interface container and initialize interface data -function init_mpi_interfaces(mesh::ParallelP4estMesh, equations, basis, elements) +function init_mpi_interfaces(mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, + equations, basis, elements) NDIMS = ndims(elements) uEltype = eltype(elements) @@ -133,7 +134,8 @@ function Base.resize!(mpi_mortars::P4estMPIMortarContainer, capacity) end # Create MPI mortar container and initialize MPI mortar data -function init_mpi_mortars(mesh::ParallelP4estMesh, equations, basis, elements) +function init_mpi_mortars(mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, equations, + basis, elements) NDIMS = ndims(mesh) RealT = real(mesh) uEltype = eltype(elements) diff --git a/src/solvers/dgsem_p4est/dg_2d_parallel.jl b/src/solvers/dgsem_p4est/dg_2d_parallel.jl index a8887351c46..3bf0cd0cab5 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parallel.jl @@ -6,7 +6,8 @@ #! format: noindent function prolong2mpiinterfaces!(cache, u, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, equations, surface_integral, dg::DG) @unpack mpi_interfaces = cache index_range = eachnode(dg) @@ -43,7 +44,8 @@ function prolong2mpiinterfaces!(cache, u, end function calc_mpi_interface_flux!(surface_flux_values, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, nonconservative_terms, equations, surface_integral, dg::DG, cache) @unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces @@ -106,7 +108,8 @@ end # Inlined version of the interface flux computation for conservation laws @inline function calc_mpi_interface_flux!(surface_flux_values, - mesh::P4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -131,7 +134,8 @@ end end function prolong2mpimortars!(cache, u, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, + equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack node_indices = cache.mpi_mortars @@ -199,7 +203,7 @@ function prolong2mpimortars!(cache, u, end function calc_mpi_mortar_flux!(surface_flux_values, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, nonconservative_terms, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DG, cache) @@ -253,7 +257,8 @@ end # Inlined version of the mortar flux computation on small elements for conservation laws @inline function calc_mpi_mortar_flux!(fstar, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -271,7 +276,9 @@ end end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, + equations, mortar_l2::LobattoLegendreMortarL2, dg::DGSEM, cache, mortar, fstar, u_buffer) diff --git a/src/solvers/dgsem_p4est/dg_3d_parallel.jl b/src/solvers/dgsem_p4est/dg_3d_parallel.jl index 13bf2a1a2eb..e504e06d2c4 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parallel.jl @@ -6,7 +6,7 @@ #! format: noindent function rhs!(du, u, t, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, equations, initial_condition, boundary_conditions, source_terms::Source, dg::DG, cache) where {Source} # Start to receive MPI data @@ -113,7 +113,8 @@ function rhs!(du, u, t, end function prolong2mpiinterfaces!(cache, u, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, equations, surface_integral, dg::DG) @unpack mpi_interfaces = cache index_range = eachnode(dg) @@ -160,7 +161,8 @@ function prolong2mpiinterfaces!(cache, u, end function calc_mpi_interface_flux!(surface_flux_values, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, nonconservative_terms, equations, surface_integral, dg::DG, cache) @unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces @@ -237,7 +239,8 @@ end # Inlined version of the interface flux computation for conservation laws @inline function calc_mpi_interface_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -265,7 +268,8 @@ end end function prolong2mpimortars!(cache, u, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack node_indices = cache.mpi_mortars @@ -374,7 +378,7 @@ function prolong2mpimortars!(cache, u, end function calc_mpi_mortar_flux!(surface_flux_values, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, nonconservative_terms, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DG, cache) @@ -437,7 +441,8 @@ end # Inlined version of the mortar flux computation on small elements for conservation laws @inline function calc_mpi_mortar_flux!(fstar, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -456,7 +461,9 @@ end end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, dg::DGSEM, cache, mortar, fstar, u_buffer, fstar_tmp) diff --git a/src/solvers/dgsem_p4est/dg_parallel.jl b/src/solvers/dgsem_p4est/dg_parallel.jl index 712ede2bfce..eaa6ab5cee2 100644 --- a/src/solvers/dgsem_p4est/dg_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_parallel.jl @@ -166,7 +166,8 @@ end # at `index_base`+1 in the MPI buffer. `data_size` is the data size associated with each small # position (i.e. position 1 or 2). The data corresponding to the large side (i.e. position 3) has # size `2 * data_size`. -@inline function buffer_mortar_indices(mesh::ParallelP4estMesh{2}, index_base, +@inline function buffer_mortar_indices(mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, index_base, data_size) return ( # first, last for local element in position 1 (small element) @@ -185,7 +186,8 @@ end # at `index_base`+1 in the MPI buffer. `data_size` is the data size associated with each small # position (i.e. position 1 to 4). The data corresponding to the large side (i.e. position 5) has # size `4 * data_size`. -@inline function buffer_mortar_indices(mesh::ParallelP4estMesh{3}, index_base, +@inline function buffer_mortar_indices(mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, index_base, data_size) return ( # first, last for local element in position 1 (small element) @@ -491,7 +493,8 @@ end # Exchange normal directions of small elements of the MPI mortars. They are needed on all involved # MPI ranks to calculate the mortar fluxes. -function exchange_normal_directions!(mpi_mortars, mpi_cache, mesh::ParallelP4estMesh, +function exchange_normal_directions!(mpi_mortars, mpi_cache, + mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, n_nodes) RealT = real(mesh) n_dims = ndims(mesh) diff --git a/src/solvers/dgsem_t8code/containers.jl b/src/solvers/dgsem_t8code/containers.jl index 093feb2985a..d7ff79fbf2f 100644 --- a/src/solvers/dgsem_t8code/containers.jl +++ b/src/solvers/dgsem_t8code/containers.jl @@ -18,19 +18,22 @@ function reinitialize_containers!(mesh::T8codeMesh, equations, dg::DGSEM, cache) @unpack boundaries = cache resize!(boundaries, mesh.nboundaries) - trixi_t8_fill_mesh_info(mesh.forest, elements, interfaces, mortars, boundaries, - mesh.boundary_names) + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names) return nothing end function count_required_surfaces!(mesh::T8codeMesh) - counts = trixi_t8_count_interfaces(mesh.forest) + counts = count_interfaces(mesh) mesh.nmortars = counts.mortars mesh.ninterfaces = counts.interfaces mesh.nboundaries = counts.boundaries + mesh.nmpimortars = counts.mpi_mortars + mesh.nmpiinterfaces = counts.mpi_interfaces + return counts end @@ -38,7 +41,9 @@ end function count_required_surfaces(mesh::T8codeMesh) return (interfaces = mesh.ninterfaces, mortars = mesh.nmortars, - boundaries = mesh.nboundaries) + boundaries = mesh.nboundaries, + mpi_interfaces = mesh.nmpiinterfaces, + mpi_mortars = mesh.nmpimortars) end # Compatibility to `dgsem_p4est/containers.jl`. diff --git a/src/solvers/dgsem_t8code/containers_2d.jl b/src/solvers/dgsem_t8code/containers_2d.jl index bf77826a34b..ce525bfdf65 100644 --- a/src/solvers/dgsem_t8code/containers_2d.jl +++ b/src/solvers/dgsem_t8code/containers_2d.jl @@ -26,6 +26,7 @@ function calc_node_coordinates!(node_coordinates, tree_class = t8_forest_get_tree_class(mesh.forest, itree) eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + global_itree = t8_forest_global_tree_id(mesh.forest, itree) for ielement in 0:(num_elements_in_tree - 1) element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) @@ -55,7 +56,7 @@ function calc_node_coordinates!(node_coordinates, multiply_dimensionwise!(view(node_coordinates, :, :, :, current_index += 1), matrix1, matrix2, view(mesh.tree_node_coordinates, :, :, :, - itree + 1), + global_itree + 1), tmp1) end end diff --git a/src/solvers/dgsem_t8code/containers_3d.jl b/src/solvers/dgsem_t8code/containers_3d.jl index f2d54ff07da..4d56bc734aa 100644 --- a/src/solvers/dgsem_t8code/containers_3d.jl +++ b/src/solvers/dgsem_t8code/containers_3d.jl @@ -28,6 +28,7 @@ function calc_node_coordinates!(node_coordinates, tree_class = t8_forest_get_tree_class(mesh.forest, itree) eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + global_itree = t8_forest_global_tree_id(mesh.forest, itree) for ielement in 0:(num_elements_in_tree - 1) element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) @@ -63,7 +64,7 @@ function calc_node_coordinates!(node_coordinates, current_index += 1), matrix1, matrix2, matrix3, view(mesh.tree_node_coordinates, :, :, :, :, - itree + 1), + global_itree + 1), tmp1) end end diff --git a/src/solvers/dgsem_t8code/containers_parallel.jl b/src/solvers/dgsem_t8code/containers_parallel.jl new file mode 100644 index 00000000000..0cb3f5887a0 --- /dev/null +++ b/src/solvers/dgsem_t8code/containers_parallel.jl @@ -0,0 +1,65 @@ +function reinitialize_containers!(mesh::ParallelT8codeMesh, equations, dg::DGSEM, cache) + @unpack elements, interfaces, boundaries, mortars, mpi_interfaces, mpi_mortars, + mpi_cache = cache + resize!(elements, ncells(mesh)) + init_elements!(elements, mesh, dg.basis) + + count_required_surfaces!(mesh) + required = count_required_surfaces(mesh) + + resize!(interfaces, required.interfaces) + + resize!(boundaries, required.boundaries) + + resize!(mortars, required.mortars) + + resize!(mpi_interfaces, required.mpi_interfaces) + + resize!(mpi_mortars, required.mpi_mortars) + + mpi_mesh_info = (mpi_mortars = mpi_mortars, + mpi_interfaces = mpi_interfaces, + + # Temporary arrays for updating `mpi_cache`. + global_mortar_ids = fill(UInt64(0), nmpimortars(mpi_mortars)), + global_interface_ids = fill(UInt64(0), nmpiinterfaces(mpi_interfaces)), + neighbor_ranks_mortar = Vector{Vector{Int}}(undef, + nmpimortars(mpi_mortars)), + neighbor_ranks_interface = fill(-1, nmpiinterfaces(mpi_interfaces))) + + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names; mpi_mesh_info = mpi_mesh_info) + + init_mpi_cache!(mpi_cache, mesh, mpi_mesh_info, nvariables(equations), nnodes(dg), + eltype(elements)) + + empty!(mpi_mesh_info.global_mortar_ids) + empty!(mpi_mesh_info.global_interface_ids) + empty!(mpi_mesh_info.neighbor_ranks_mortar) + empty!(mpi_mesh_info.neighbor_ranks_interface) + + # Re-initialize and distribute normal directions of MPI mortars; requires + # MPI communication, so the MPI cache must be re-initialized beforehand. + init_normal_directions!(mpi_mortars, dg.basis, elements) + exchange_normal_directions!(mpi_mortars, mpi_cache, mesh, nnodes(dg)) + + return nothing +end + +# Compatibility to `dgsem_p4est/containers.jl`. +function init_mpi_interfaces!(interfaces, mesh::ParallelT8codeMesh) + # Do nothing. + return nothing +end + +# Compatibility to `dgsem_p4est/containers.jl`. +function init_mpi_mortars!(mortars, mesh::ParallelT8codeMesh) + # Do nothing. + return nothing +end + +# Compatibility to `dgsem_p4est/containers_parallel.jl`. +function init_mpi_mortars!(mpi_mortars, mesh::ParallelT8codeMesh, basis, elements) + # Do nothing. + return nothing +end diff --git a/src/solvers/dgsem_t8code/dg.jl b/src/solvers/dgsem_t8code/dg.jl index 6e9660c917d..e01b12e0f80 100644 --- a/src/solvers/dgsem_t8code/dg.jl +++ b/src/solvers/dgsem_t8code/dg.jl @@ -13,8 +13,8 @@ function create_cache(mesh::T8codeMesh, equations::AbstractEquations, dg::DG, :: boundaries = init_boundaries(mesh, equations, dg.basis, elements) mortars = init_mortars(mesh, equations, dg.basis, elements) - trixi_t8_fill_mesh_info(mesh.forest, elements, interfaces, mortars, boundaries, - mesh.boundary_names) + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names) cache = (; elements, interfaces, boundaries, mortars) @@ -29,4 +29,7 @@ end include("containers.jl") include("containers_2d.jl") include("containers_3d.jl") + +include("containers_parallel.jl") +include("dg_parallel.jl") end # @muladd diff --git a/src/solvers/dgsem_t8code/dg_parallel.jl b/src/solvers/dgsem_t8code/dg_parallel.jl new file mode 100644 index 00000000000..ece614b7d75 --- /dev/null +++ b/src/solvers/dgsem_t8code/dg_parallel.jl @@ -0,0 +1,135 @@ +@muladd begin +#! format: noindent + +# This method is called when a `SemidiscretizationHyperbolic` is constructed. +# It constructs the basic `cache` used throughout the simulation to compute +# the RHS etc. +function create_cache(mesh::ParallelT8codeMesh, equations::AbstractEquations, dg::DG, + ::Any, + ::Type{uEltype}) where {uEltype <: Real} + # Make sure to balance and partition the forest before creating any + # containers in case someone has tampered with forest after creating the + # mesh. + balance!(mesh) + partition!(mesh) + + count_required_surfaces!(mesh) + + elements = init_elements(mesh, equations, dg.basis, uEltype) + mortars = init_mortars(mesh, equations, dg.basis, elements) + interfaces = init_interfaces(mesh, equations, dg.basis, elements) + boundaries = init_boundaries(mesh, equations, dg.basis, elements) + + mpi_mortars = init_mpi_mortars(mesh, equations, dg.basis, elements) + mpi_interfaces = init_mpi_interfaces(mesh, equations, dg.basis, elements) + + mpi_mesh_info = (mpi_mortars = mpi_mortars, + mpi_interfaces = mpi_interfaces, + global_mortar_ids = fill(UInt64(0), nmpimortars(mpi_mortars)), + global_interface_ids = fill(UInt64(0), + nmpiinterfaces(mpi_interfaces)), + neighbor_ranks_mortar = Vector{Vector{Int}}(undef, + nmpimortars(mpi_mortars)), + neighbor_ranks_interface = fill(-1, + nmpiinterfaces(mpi_interfaces))) + + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names; mpi_mesh_info = mpi_mesh_info) + + mpi_cache = init_mpi_cache(mesh, mpi_mesh_info, nvariables(equations), nnodes(dg), + uEltype) + + empty!(mpi_mesh_info.global_mortar_ids) + empty!(mpi_mesh_info.global_interface_ids) + empty!(mpi_mesh_info.neighbor_ranks_mortar) + empty!(mpi_mesh_info.neighbor_ranks_interface) + + init_normal_directions!(mpi_mortars, dg.basis, elements) + exchange_normal_directions!(mpi_mortars, mpi_cache, mesh, nnodes(dg)) + + cache = (; elements, interfaces, mpi_interfaces, boundaries, mortars, mpi_mortars, + mpi_cache) + + # Add specialized parts of the cache required to compute the volume integral etc. + cache = (; cache..., + create_cache(mesh, equations, dg.volume_integral, dg, uEltype)...) + cache = (; cache..., create_cache(mesh, equations, dg.mortar, uEltype)...) + + return cache +end + +function init_mpi_cache(mesh::ParallelT8codeMesh, mpi_mesh_info, nvars, nnodes, uEltype) + mpi_cache = P4estMPICache(uEltype) + init_mpi_cache!(mpi_cache, mesh, mpi_mesh_info, nvars, nnodes, uEltype) + return mpi_cache +end + +function init_mpi_cache!(mpi_cache::P4estMPICache, mesh::ParallelT8codeMesh, + mpi_mesh_info, nvars, nnodes, uEltype) + mpi_neighbor_ranks, mpi_neighbor_interfaces, mpi_neighbor_mortars = init_mpi_neighbor_connectivity(mpi_mesh_info, + mesh) + + mpi_send_buffers, mpi_recv_buffers, mpi_send_requests, mpi_recv_requests = init_mpi_data_structures(mpi_neighbor_interfaces, + mpi_neighbor_mortars, + ndims(mesh), + nvars, + nnodes, + uEltype) + + n_elements_global = Int(t8_forest_get_global_num_elements(mesh.forest)) + n_elements_local = Int(t8_forest_get_local_num_elements(mesh.forest)) + + n_elements_by_rank = Vector{Int}(undef, mpi_nranks()) + n_elements_by_rank[mpi_rank() + 1] = n_elements_local + + MPI.Allgather!(MPI.UBuffer(n_elements_by_rank, 1), mpi_comm()) + + n_elements_by_rank = OffsetArray(n_elements_by_rank, 0:(mpi_nranks() - 1)) + + # Account for 1-based indexing in Julia. + first_element_global_id = sum(n_elements_by_rank[0:(mpi_rank() - 1)]) + 1 + + @assert n_elements_global==sum(n_elements_by_rank) "error in total number of elements" + + @pack! mpi_cache = mpi_neighbor_ranks, mpi_neighbor_interfaces, + mpi_neighbor_mortars, + mpi_send_buffers, mpi_recv_buffers, + mpi_send_requests, mpi_recv_requests, + n_elements_by_rank, n_elements_global, + first_element_global_id + + return mpi_cache +end + +function init_mpi_neighbor_connectivity(mpi_mesh_info, mesh::ParallelT8codeMesh) + @unpack mpi_interfaces, mpi_mortars, global_interface_ids, neighbor_ranks_interface, global_mortar_ids, neighbor_ranks_mortar = mpi_mesh_info + + mpi_neighbor_ranks = vcat(neighbor_ranks_interface, neighbor_ranks_mortar...) |> + sort |> unique + + p = sortperm(global_interface_ids) + + neighbor_ranks_interface .= neighbor_ranks_interface[p] + interface_ids = collect(1:nmpiinterfaces(mpi_interfaces))[p] + + p = sortperm(global_mortar_ids) + neighbor_ranks_mortar .= neighbor_ranks_mortar[p] + mortar_ids = collect(1:nmpimortars(mpi_mortars))[p] + + # For each neighbor rank, init connectivity data structures + mpi_neighbor_interfaces = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks)) + mpi_neighbor_mortars = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks)) + for (index, d) in enumerate(mpi_neighbor_ranks) + mpi_neighbor_interfaces[index] = interface_ids[findall(==(d), + neighbor_ranks_interface)] + mpi_neighbor_mortars[index] = mortar_ids[findall(x -> (d in x), + neighbor_ranks_mortar)] + end + + # Check that all interfaces were counted exactly once + @assert mapreduce(length, +, mpi_neighbor_interfaces; init = 0) == + nmpiinterfaces(mpi_interfaces) + + return mpi_neighbor_ranks, mpi_neighbor_interfaces, mpi_neighbor_mortars +end +end # @muladd diff --git a/src/solvers/dgsem_tree/dg_2d_parallel.jl b/src/solvers/dgsem_tree/dg_2d_parallel.jl index 8095dae123a..157d462aa2f 100644 --- a/src/solvers/dgsem_tree/dg_2d_parallel.jl +++ b/src/solvers/dgsem_tree/dg_2d_parallel.jl @@ -446,7 +446,8 @@ function init_mpi_neighbor_connectivity(elements, mpi_interfaces, mpi_mortars, end function rhs!(du, u, t, - mesh::Union{ParallelTreeMesh{2}, ParallelP4estMesh{2}}, equations, + mesh::Union{ParallelTreeMesh{2}, ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, equations, initial_condition, boundary_conditions, source_terms::Source, dg::DG, cache) where {Source} # Start to receive MPI data diff --git a/test/test_mpi.jl b/test/test_mpi.jl index ad1ba4e835d..1ab1282b891 100644 --- a/test/test_mpi.jl +++ b/test/test_mpi.jl @@ -19,10 +19,12 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() # TreeMesh tests include("test_mpi_tree.jl") - # P4estMesh tests + # P4estMesh and T8codeMesh tests include("test_mpi_p4est_2d.jl") + include("test_mpi_t8code_2d.jl") if !CI_ON_WINDOWS # see comment on `CI_ON_WINDOWS` above include("test_mpi_p4est_3d.jl") + include("test_mpi_t8code_3d.jl") end end # MPI diff --git a/test/test_mpi_p4est_2d.jl b/test/test_mpi_p4est_2d.jl index da90537fcfd..6d66bc68a26 100644 --- a/test/test_mpi_p4est_2d.jl +++ b/test/test_mpi_p4est_2d.jl @@ -33,6 +33,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") @test errors.linf≈[0.00011787417954578494] rtol=1.0e-4 end end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_nonconforming_flag.jl" begin @@ -40,6 +49,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") "elixir_advection_nonconforming_flag.jl"), l2=[3.198940059144588e-5], linf=[0.00030636069494005547]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_unstructured_flag.jl" begin @@ -47,6 +65,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") "elixir_advection_unstructured_flag.jl"), l2=[0.0005379687442422346], linf=[0.007438525029884735]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr_solution_independent.jl" begin @@ -56,6 +83,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") l2=[4.949660644033807e-5], linf=[0.0004867846262313763], coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr_unstructured_flag.jl" begin @@ -64,6 +100,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") l2=[0.0012766060609964525], linf=[0.01750280631586159], coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_restart.jl" begin @@ -73,6 +118,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") # With the default `maxiters = 1` in coverage tests, # there would be no time steps after the restart. coverage_override=(maxiters = 100_000,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin @@ -90,6 +144,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") 0.03759938693042297, 0.08039824959535657, ]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end end # P4estMesh MPI diff --git a/test/test_mpi_p4est_3d.jl b/test/test_mpi_p4est_3d.jl index 75f43650082..cca9093ec51 100644 --- a/test/test_mpi_p4est_3d.jl +++ b/test/test_mpi_p4est_3d.jl @@ -33,6 +33,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") @test errors.linf≈[0.0014548839020096516] rtol=1.0e-4 end end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr.jl" begin @@ -46,6 +55,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") initial_refinement_level = 2, base_level = 2, med_level = 3, max_level = 4)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr_unstructured_curved.jl" begin @@ -58,6 +76,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") initial_refinement_level = 0, base_level = 0, med_level = 1, max_level = 2)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_restart.jl" begin @@ -67,12 +94,30 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") # With the default `maxiters = 1` in coverage tests, # there would be no time steps after the restart. coverage_override=(maxiters = 100_000,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_cubed_sphere.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_cubed_sphere.jl"), l2=[0.002006918015656413], linf=[0.027655117058380085]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end # Compressible Euler @@ -94,6 +139,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") 0.008526972236273522, ], tspan=(0.0, 0.01)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin @@ -114,6 +168,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") 0.01562861968368434, ], tspan=(0.0, 1.0)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_ec.jl" begin @@ -134,6 +197,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") ], tspan=(0.0, 0.2), coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_source_terms_nonperiodic_hohqmesh.jl" begin @@ -153,6 +225,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") 0.048396544302230504, 0.1154589758186293, ]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end end # P4estMesh MPI diff --git a/test/test_mpi_t8code_2d.jl b/test/test_mpi_t8code_2d.jl new file mode 100644 index 00000000000..7c7fc03898c --- /dev/null +++ b/test/test_mpi_t8code_2d.jl @@ -0,0 +1,142 @@ +module TestExamplesMPIT8codeMesh2D + +using Test +using Trixi + +include("test_trixi.jl") + +const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") + +@testset "T8codeMesh MPI 2D" begin +#! format: noindent + +# Run basic tests +@testset "Examples 2D" begin + # Linear scalar advection + @trixi_testset "elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[8.311947673061856e-6], + linf=[6.627000273229378e-5]) + + @testset "error-based step size control" begin + Trixi.mpi_isroot() && println("-"^100) + Trixi.mpi_isroot() && + println("elixir_advection_basic.jl with error-based step size control") + + sol = solve(ode, RDPK3SpFSAL35(); abstol = 1.0e-4, reltol = 1.0e-4, + ode_default_options()..., callback = callbacks) + summary_callback() + errors = analysis_callback(sol) + if Trixi.mpi_isroot() + @test errors.l2≈[3.3022040342579066e-5] rtol=1.0e-4 + @test errors.linf≈[0.00011787417954578494] rtol=1.0e-4 + end + end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_nonconforming_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_nonconforming_flag.jl"), + l2=[3.198940059144588e-5], + linf=[0.00030636069494005547]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_unstructured_flag.jl"), + l2=[0.0005379687442422346], + linf=[0.007438525029884735]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr_solution_independent.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_solution_independent.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[4.933027431215839e-5], + linf=[0.00048678461161243136], + coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_unstructured_flag.jl"), + l2=[0.001980652042312077], + linf=[0.0328882442132265], + coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"), + l2=[ + 0.0034516244508588046, + 0.0023420334036925493, + 0.0024261923964557187, + 0.004731710454271893, + ], + linf=[ + 0.04155789011775046, + 0.024772109862748914, + 0.03759938693042297, + 0.08039824959535657, + ]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end +end +end # T8codeMesh MPI + +end # module diff --git a/test/test_mpi_t8code_3d.jl b/test/test_mpi_t8code_3d.jl new file mode 100644 index 00000000000..a15690a7629 --- /dev/null +++ b/test/test_mpi_t8code_3d.jl @@ -0,0 +1,180 @@ +module TestExamplesMPIT8codeMesh3D + +using Test +using Trixi + +include("test_trixi.jl") + +const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_3d_dgsem") + +@testset "T8codeMesh MPI 3D" begin +#! format: noindent + +# Run basic tests +@testset "Examples 3D" begin + # Linear scalar advection + @trixi_testset "elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[0.00016263963870641478], + linf=[0.0014537194925779984]) + + @testset "error-based step size control" begin + Trixi.mpi_isroot() && println("-"^100) + Trixi.mpi_isroot() && + println("elixir_advection_basic.jl with error-based step size control") + + sol = solve(ode, RDPK3SpFSAL35(); abstol = 1.0e-4, reltol = 1.0e-4, + ode_default_options()..., callback = callbacks) + summary_callback() + errors = analysis_callback(sol) + if Trixi.mpi_isroot() + @test errors.l2≈[0.00016800412839949264] rtol=1.0e-4 + @test errors.linf≈[0.0014548839020096516] rtol=1.0e-4 + end + end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[1.1302812803902801e-5], + linf=[0.0007889950196294793], + # override values are different from the serial tests to ensure each process holds at least + # one element, otherwise OrdinaryDiffEq fails during initialization + coverage_override=(maxiters = 6, + initial_refinement_level = 2, + base_level = 2, med_level = 3, + max_level = 4)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_unstructured_curved.jl"), + l2=[2.0556575425846923e-5], + linf=[0.00105682693484822], + tspan=(0.0, 1.0), + coverage_override=(maxiters = 6, + initial_refinement_level = 0, + base_level = 0, med_level = 1, + max_level = 2)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # Compressible Euler + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonconforming_unstructured_curved.jl"), + l2=[ + 4.070355207909268e-5, + 4.4993257426833716e-5, + 5.10588457841744e-5, + 5.102840924036687e-5, + 0.00019986264001630542, + ], + linf=[ + 0.0016987332417202072, + 0.003622956808262634, + 0.002029576258317789, + 0.0024206977281964193, + 0.008526972236273522, + ], + tspan=(0.0, 0.01)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonperiodic.jl"), + l2=[ + 0.0015106060984283647, + 0.0014733349038567685, + 0.00147333490385685, + 0.001473334903856929, + 0.0028149479453087093, + ], + linf=[ + 0.008070806335238156, + 0.009007245083113125, + 0.009007245083121784, + 0.009007245083102688, + 0.01562861968368434, + ], + tspan=(0.0, 1.0)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_euler_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_ec.jl"), + l2=[ + 0.010380390326164493, + 0.006192950051354618, + 0.005970674274073704, + 0.005965831290564327, + 0.02628875593094754, + ], + linf=[ + 0.3326911600075694, + 0.2824952141320467, + 0.41401037398065543, + 0.45574161423218573, + 0.8099577682187109, + ], + tspan=(0.0, 0.2), + coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end +end +end # T8codeMesh MPI + +end # module diff --git a/test/test_t8code_2d.jl b/test/test_t8code_2d.jl index ab95e068d02..d536a6dd73a 100644 --- a/test/test_t8code_2d.jl +++ b/test/test_t8code_2d.jl @@ -33,10 +33,9 @@ end @trixi_testset "test check_for_negative_volumes" begin @test_warn "Discovered negative volumes" begin # Unstructured mesh with six cells which have left-handed node ordering. - mesh_file = joinpath(EXAMPLES_DIR, "rectangle_with_negative_volumes.msh") - isfile(mesh_file) || - download("https://gist.githubusercontent.com/jmark/bfe0d45f8e369298d6cc637733819013/raw/cecf86edecc736e8b3e06e354c494b2052d41f7a/rectangle_with_negative_volumes.msh", - mesh_file) + mesh_file = Trixi.download("https://gist.githubusercontent.com/jmark/bfe0d45f8e369298d6cc637733819013/raw/cecf86edecc736e8b3e06e354c494b2052d41f7a/rectangle_with_negative_volumes.msh", + joinpath(EXAMPLES_DIR, + "rectangle_with_negative_volumes.msh")) # This call should throw a warning about negative volumes detected. mesh = T8codeMesh(mesh_file, 2) From c7cee980f56835e9c396424e742efe85da465042 Mon Sep 17 00:00:00 2001 From: Andrew Winters Date: Wed, 31 Jan 2024 19:09:15 +0100 Subject: [PATCH 14/22] Remove race condition in mpi testing (#1821) * remove race condition in mpi testing * add additional barriers --- test/test_mpi.jl | 2 ++ test/test_mpi_tree.jl | 3 ++- test/test_threaded.jl | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/test/test_mpi.jl b/test/test_mpi.jl index 1ab1282b891..001d9bff86e 100644 --- a/test/test_mpi.jl +++ b/test/test_mpi.jl @@ -8,6 +8,7 @@ include("test_trixi.jl") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) # CI with MPI and some tests fails often on Windows. Thus, we check whether this # is the case here. We use GitHub Actions, so we can check whether we run CI @@ -45,5 +46,6 @@ end # MPI supporting functionality # Clean up afterwards: delete Trixi.jl output directory Trixi.mpi_isroot() && @test_nowarn rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) end # module diff --git a/test/test_mpi_tree.jl b/test/test_mpi_tree.jl index 0831f6a1313..6351a405b5d 100644 --- a/test/test_mpi_tree.jl +++ b/test/test_mpi_tree.jl @@ -76,7 +76,8 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() # Here, we also test that SaveSolutionCallback prints multiple mesh files with AMR # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" - isdir(outdir) && rm(outdir, recursive = true) + Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) + Trixi.MPI.Barrier(Trixi.mpi_comm()) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_refine_twice.jl"), l2=[0.00020547512522578292], diff --git a/test/test_threaded.jl b/test/test_threaded.jl index dbbcbf4c7ce..a8a1b1b425a 100644 --- a/test/test_threaded.jl +++ b/test/test_threaded.jl @@ -8,6 +8,7 @@ include("test_trixi.jl") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) @testset "Threaded tests" begin #! format: noindent @@ -471,5 +472,6 @@ end # Clean up afterwards: delete Trixi.jl output directory Trixi.mpi_isroot() && isdir(outdir) && @test_nowarn rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) end # module From dfd632e69631ff4dbb42215966d4f7a546b92816 Mon Sep 17 00:00:00 2001 From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> Date: Wed, 31 Jan 2024 21:50:27 +0100 Subject: [PATCH 15/22] Add subcell positivity limiting of non-linear variables (#1738) * Add positivity limiting of non-linear variables * Revise derivative function call; Add default derivative version * Adapt test to actually test pos limiter for nonlinear variables * Add unit test to test default implementation of variable_derivative * Clean up comments and code * Rename Newton-bisection variables * Implement suggestions * Relocate functions * Implement suggestions * Change error message for negative value with low-order method * Add changes from main to new limiter * Update NEWS.md * Rename is_valid_state and gradient_u --------- Co-authored-by: Michael Schlottke-Lakemper --- NEWS.md | 1 + ...kelvin_helmholtz_instability_sc_subcell.jl | 91 ++++++++ .../elixir_mhd_shockcapturing_subcell.jl | 7 +- src/callbacks_stage/subcell_bounds_check.jl | 8 + .../subcell_bounds_check_2d.jl | 18 ++ src/equations/compressible_euler_2d.jl | 21 ++ src/equations/equations.jl | 6 + src/equations/ideal_glm_mhd_2d.jl | 23 ++ src/solvers/dgsem_tree/subcell_limiters.jl | 60 +++-- src/solvers/dgsem_tree/subcell_limiters_2d.jl | 215 +++++++++++++++++- test/test_tree_2d_euler.jl | 26 +++ test/test_tree_2d_mhd.jl | 32 +-- test/test_unit.jl | 23 +- 13 files changed, 496 insertions(+), 35 deletions(-) create mode 100644 examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl diff --git a/NEWS.md b/NEWS.md index 3a3a504a911..02a723fca45 100644 --- a/NEWS.md +++ b/NEWS.md @@ -11,6 +11,7 @@ for human readability. - `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` - Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, can now be digested by Trixi in 2D and 3D. +- Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` ## Changes when updating to v0.6 from v0.5.x diff --git a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl new file mode 100644 index 00000000000..1817672778a --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl @@ -0,0 +1,91 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations +gamma = 1.4 +equations = CompressibleEulerEquations2D(gamma) + +""" + initial_condition_kelvin_helmholtz_instability(x, t, equations::CompressibleEulerEquations2D) + +A version of the classical Kelvin-Helmholtz instability based on +- Andrés M. Rueda-Ramírez, Gregor J. Gassner (2021) + A Subcell Finite Volume Positivity-Preserving Limiter for DGSEM Discretizations + of the Euler Equations + [arXiv: 2102.06017](https://arxiv.org/abs/2102.06017) +""" +function initial_condition_kelvin_helmholtz_instability(x, t, + equations::CompressibleEulerEquations2D) + # change discontinuity to tanh + # typical resolution 128^2, 256^2 + # domain size is [-1,+1]^2 + slope = 15 + amplitude = 0.02 + B = tanh(slope * x[2] + 7.5) - tanh(slope * x[2] - 7.5) + rho = 0.5 + 0.75 * B + v1 = 0.5 * (B - 1) + v2 = 0.1 * sin(2 * pi * x[1]) + p = 1.0 + return prim2cons(SVector(rho, v1, v2, p), equations) +end +initial_condition = initial_condition_kelvin_helmholtz_instability + +surface_flux = flux_lax_friedrichs +volume_flux = flux_ranocha +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) + +limiter_idp = SubcellLimiterIDP(equations, basis; + positivity_variables_cons = ["rho"], + positivity_variables_nonlinear = [pressure]) +volume_integral = VolumeIntegralSubcellLimiting(limiter_idp; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) +solver = DGSEM(basis, surface_flux, volume_integral) + +coordinates_min = (-1.0, -1.0) +coordinates_max = (1.0, 1.0) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 5, + n_cells_max = 100_000) +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 3.7) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +save_restart = SaveRestartCallback(interval = 1000, + save_final_restart = true) + +stepsize_callback = StepsizeCallback(cfl = 0.7) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback, + save_restart, save_solution) + +############################################################################### +# run the simulation + +stage_callbacks = (SubcellLimiterIDPCorrection(), BoundsCheckCallback(save_errors = false)) + +sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks = stage_callbacks); + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl b/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl index fe9ad92467f..74d0370647a 100644 --- a/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl +++ b/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl @@ -22,7 +22,7 @@ function initial_condition_blast_wave(x, t, equations::IdealGlmMhdEquations2D) r = sqrt(x[1]^2 + x[2]^2) pmax = 10.0 - pmin = 1.0 + pmin = 0.01 rhomax = 1.0 rhomin = 0.01 if r <= 0.09 @@ -52,7 +52,8 @@ basis = LobattoLegendreBasis(3) limiter_idp = SubcellLimiterIDP(equations, basis; positivity_variables_cons = ["rho"], - positivity_correction_factor = 0.5) + positivity_variables_nonlinear = [pressure], + positivity_correction_factor = 0.1) volume_integral = VolumeIntegralSubcellLimiting(limiter_idp; volume_flux_dg = volume_flux, volume_flux_fv = surface_flux) @@ -84,7 +85,7 @@ save_solution = SaveSolutionCallback(interval = 100, save_final_solution = true, solution_variables = cons2prim) -cfl = 0.5 +cfl = 0.4 stepsize_callback = StepsizeCallback(cfl = cfl) glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl) diff --git a/src/callbacks_stage/subcell_bounds_check.jl b/src/callbacks_stage/subcell_bounds_check.jl index 9f34a6b3b4b..4dbf44d29c4 100644 --- a/src/callbacks_stage/subcell_bounds_check.jl +++ b/src/callbacks_stage/subcell_bounds_check.jl @@ -97,6 +97,9 @@ function init_callback(callback::BoundsCheckCallback, semi, limiter::SubcellLimi end print(f, ", " * string(variables[v]) * "_min") end + for variable in limiter.positivity_variables_nonlinear + print(f, ", " * string(variable) * "_min") + end end println(f) end @@ -142,6 +145,11 @@ end println(string(variables[v]) * ":\n- positivity: ", idp_bounds_delta_global[Symbol(string(v), "_min")]) end + for variable in limiter.positivity_variables_nonlinear + variable_string = string(variable) + println(variable_string * ":\n- positivity: ", + idp_bounds_delta_global[Symbol(variable_string, "_min")]) + end end println("─"^100 * "\n") diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl index 545d19b5136..19d73968c9a 100644 --- a/src/callbacks_stage/subcell_bounds_check_2d.jl +++ b/src/callbacks_stage/subcell_bounds_check_2d.jl @@ -60,6 +60,20 @@ deviation_threaded[stride_size * Threads.threadid()] = deviation end end + for variable in limiter.positivity_variables_nonlinear + key = Symbol(string(variable), "_min") + deviation_threaded = idp_bounds_delta_local[key] + @threaded for element in eachelement(solver, cache) + deviation = deviation_threaded[stride_size * Threads.threadid()] + for j in eachnode(solver), i in eachnode(solver) + var = variable(get_node_vars(u, equations, solver, i, j, element), + equations) + deviation = max(deviation, + variable_bounds[key][i, j, element] - var) + end + deviation_threaded[stride_size * Threads.threadid()] = deviation + end + end end for (key, _) in idp_bounds_delta_local @@ -92,6 +106,10 @@ print(f, ", ", idp_bounds_delta_local[Symbol(string(v), "_min")][stride_size]) end + for variable in limiter.positivity_variables_nonlinear + print(f, ", ", + idp_bounds_delta_local[Symbol(string(variable), "_min")][stride_size]) + end end println(f) end diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl index 3c6f759db2b..f5a632723cf 100644 --- a/src/equations/compressible_euler_2d.jl +++ b/src/equations/compressible_euler_2d.jl @@ -1632,6 +1632,18 @@ end return p end +# Transformation from conservative variables u to d(p)/d(u) +@inline function gradient_conservative(::typeof(pressure), + u, equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + + v1 = rho_v1 / rho + v2 = rho_v2 / rho + v_square = v1^2 + v2^2 + + return (equations.gamma - 1.0) * SVector(0.5 * v_square, -v1, -v2, 1.0) +end + @inline function density_pressure(u, equations::CompressibleEulerEquations2D) rho, rho_v1, rho_v2, rho_e = u rho_times_p = (equations.gamma - 1) * (rho * rho_e - 0.5 * (rho_v1^2 + rho_v2^2)) @@ -1699,4 +1711,13 @@ end @inline function energy_internal(cons, equations::CompressibleEulerEquations2D) return energy_total(cons, equations) - energy_kinetic(cons, equations) end + +# State validation for Newton-bisection method of subcell IDP limiting +@inline function Base.isvalid(u, equations::CompressibleEulerEquations2D) + p = pressure(u, equations) + if u[1] <= 0.0 || p <= 0.0 + return false + end + return true +end end # @muladd diff --git a/src/equations/equations.jl b/src/equations/equations.jl index 7a3c326984d..c041bf117ba 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -376,6 +376,12 @@ of the correct length `nvariables(equations)`. """ function energy_internal end +# Default implementation of gradient for `variable`. Used for subcell limiting. +# Implementing a gradient function for a specific variable improves the performance. +@inline function gradient_conservative(variable, u, equations) + return ForwardDiff.gradient(x -> variable(x, equations), u) +end + #################################################################################################### # Include files with actual implementations for different systems of equations. diff --git a/src/equations/ideal_glm_mhd_2d.jl b/src/equations/ideal_glm_mhd_2d.jl index 43d1991e34b..4366cd32f08 100644 --- a/src/equations/ideal_glm_mhd_2d.jl +++ b/src/equations/ideal_glm_mhd_2d.jl @@ -1118,6 +1118,20 @@ end return p end +# Transformation from conservative variables u to d(p)/d(u) +@inline function gradient_conservative(::typeof(pressure), + u, equations::IdealGlmMhdEquations2D) + rho, rho_v1, rho_v2, rho_v3, rho_e, B1, B2, B3, psi = u + + v1 = rho_v1 / rho + v2 = rho_v2 / rho + v3 = rho_v3 / rho + v_square = v1^2 + v2^2 + v3^2 + + return (equations.gamma - 1.0) * + SVector(0.5 * v_square, -v1, -v2, -v3, 1.0, -B1, -B2, -B3, -psi) +end + @inline function density_pressure(u, equations::IdealGlmMhdEquations2D) rho, rho_v1, rho_v2, rho_v3, rho_e, B1, B2, B3, psi = u p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1^2 + rho_v2^2 + rho_v3^2) / rho @@ -1384,6 +1398,15 @@ end cons[9]^2 / 2) end +# State validation for Newton-bisection method of subcell IDP limiting +@inline function Base.isvalid(u, equations::IdealGlmMhdEquations2D) + p = pressure(u, equations) + if u[1] <= 0.0 || p <= 0.0 + return false + end + return true +end + # Calculate the cross helicity (\vec{v}⋅\vec{B}) for a conservative state `cons' @inline function cross_helicity(cons, ::IdealGlmMhdEquations2D) return (cons[2] * cons[6] + cons[3] * cons[7] + cons[4] * cons[8]) / cons[1] diff --git a/src/solvers/dgsem_tree/subcell_limiters.jl b/src/solvers/dgsem_tree/subcell_limiters.jl index 055e7ce24a4..e433c953779 100644 --- a/src/solvers/dgsem_tree/subcell_limiters.jl +++ b/src/solvers/dgsem_tree/subcell_limiters.jl @@ -16,18 +16,28 @@ end SubcellLimiterIDP(equations::AbstractEquations, basis; local_minmax_variables_cons = String[], positivity_variables_cons = String[], - positivity_correction_factor = 0.1) + positivity_variables_nonlinear = [], + positivity_correction_factor = 0.1, + max_iterations_newton = 10, + newton_tolerances = (1.0e-12, 1.0e-14), + gamma_constant_newton = 2 * ndims(equations)) Subcell invariant domain preserving (IDP) limiting used with [`VolumeIntegralSubcellLimiting`](@ref) including: - Local maximum/minimum Zalesak-type limiting for conservative variables (`local_minmax_variables_cons`) -- Positivity limiting for conservative variables (`positivity_variables_cons`) +- Positivity limiting for conservative variables (`positivity_variables_cons`) and nonlinear variables +(`positivity_variables_nonlinear`) Conservative variables to be limited are passed as a vector of strings, e.g. `local_minmax_variables_cons = ["rho"]` -and `positivity_variables_cons = ["rho"]`. +and `positivity_variables_cons = ["rho"]`. For nonlinear variables the specific functions are +passed in a vector, e.g. `positivity_variables_nonlinear = [pressure]`. The bounds are calculated using the low-order FV solution. The positivity limiter uses `positivity_correction_factor` such that `u^new >= positivity_correction_factor * u^FV`. +The limiting of nonlinear variables uses a Newton-bisection method with a maximum of +`max_iterations_newton` iterations, relative and absolute tolerances of `newton_tolerances` +and a provisional update constant `gamma_constant_newton` (`gamma_constant_newton>=2*d`, +where `d = #dimensions`). See equation (20) of Pazner (2020) and equation (30) of Rueda-Ramírez et al. (2022). !!! note This limiter and the correction callback [`SubcellLimiterIDPCorrection`](@ref) only work together. @@ -45,22 +55,32 @@ The bounds are calculated using the low-order FV solution. The positivity limite !!! warning "Experimental implementation" This is an experimental feature and may change in future releases. """ -struct SubcellLimiterIDP{RealT <: Real, Cache} <: AbstractSubcellLimiter +struct SubcellLimiterIDP{RealT <: Real, LimitingVariablesNonlinear, Cache} <: + AbstractSubcellLimiter local_minmax::Bool local_minmax_variables_cons::Vector{Int} # Local mininum/maximum principles for conservative variables positivity::Bool positivity_variables_cons::Vector{Int} # Positivity for conservative variables + positivity_variables_nonlinear::LimitingVariablesNonlinear # Positivity for nonlinear variables positivity_correction_factor::RealT cache::Cache + max_iterations_newton::Int + newton_tolerances::Tuple{RealT, RealT} # Relative and absolute tolerances for Newton's method + gamma_constant_newton::RealT # Constant for the subcell limiting of convex (nonlinear) constraints end # this method is used when the limiter is constructed as for shock-capturing volume integrals function SubcellLimiterIDP(equations::AbstractEquations, basis; local_minmax_variables_cons = String[], positivity_variables_cons = String[], - positivity_correction_factor = 0.1) + positivity_variables_nonlinear = [], + positivity_correction_factor = 0.1, + max_iterations_newton = 10, + newton_tolerances = (1.0e-12, 1.0e-14), + gamma_constant_newton = 2 * ndims(equations)) local_minmax = (length(local_minmax_variables_cons) > 0) - positivity = (length(positivity_variables_cons) > 0) + positivity = (length(positivity_variables_cons) + + length(positivity_variables_nonlinear) > 0) local_minmax_variables_cons_ = get_variable_index.(local_minmax_variables_cons, equations) @@ -80,13 +100,20 @@ function SubcellLimiterIDP(equations::AbstractEquations, basis; bound_keys = (bound_keys..., Symbol(string(v), "_min")) end end + for variable in positivity_variables_nonlinear + bound_keys = (bound_keys..., Symbol(string(variable), "_min")) + end cache = create_cache(SubcellLimiterIDP, equations, basis, bound_keys) SubcellLimiterIDP{typeof(positivity_correction_factor), + typeof(positivity_variables_nonlinear), typeof(cache)}(local_minmax, local_minmax_variables_cons_, positivity, positivity_variables_cons_, - positivity_correction_factor, cache) + positivity_variables_nonlinear, + positivity_correction_factor, cache, + max_iterations_newton, newton_tolerances, + gamma_constant_newton) end function Base.show(io::IO, limiter::SubcellLimiterIDP) @@ -97,10 +124,15 @@ function Base.show(io::IO, limiter::SubcellLimiterIDP) if !(local_minmax || positivity) print(io, "No limiter selected => pure DG method") else - print(io, "limiter=(") - local_minmax && print(io, "min/max limiting, ") - positivity && print(io, "positivity") - print(io, "), ") + features = String[] + if local_minmax + push!(features, "local min/max") + end + if positivity + push!(features, "positivity") + end + join(io, features, ", ") + print(io, "Limiter=($features), ") end print(io, "Local bounds with FV solution") print(io, ")") @@ -120,15 +152,15 @@ function Base.show(io::IO, ::MIME"text/plain", limiter::SubcellLimiterIDP) if local_minmax setup = [ setup..., - "" => "local maximum/minimum bounds for conservative variables $(limiter.local_minmax_variables_cons)", + "" => "Local maximum/minimum limiting for conservative variables $(limiter.local_minmax_variables_cons)", ] end if positivity - string = "positivity for conservative variables $(limiter.positivity_variables_cons)" + string = "Positivity limiting for conservative variables $(limiter.positivity_variables_cons) and $(limiter.positivity_variables_nonlinear)" setup = [setup..., "" => string] setup = [ setup..., - "" => " positivity correction factor = $(limiter.positivity_correction_factor)", + "" => "- with positivity correction factor = $(limiter.positivity_correction_factor)", ] end setup = [ diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl index 3d272359fe4..3f7954c8958 100644 --- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl @@ -5,6 +5,10 @@ @muladd begin #! format: noindent +############################################################################### +# IDP Limiting +############################################################################### + # this method is used when the limiter is constructed as for shock-capturing volume integrals function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquations{2}, basis::LobattoLegendreBasis, bound_keys) @@ -66,6 +70,9 @@ function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSE return nothing end +############################################################################### +# Calculation of local bounds using low-order FV solution + @inline function calc_bounds_twosided!(var_min, var_max, variable, u, t, semi) mesh, equations, dg, cache = mesh_equations_solver_cache(semi) # Calc bounds inside elements @@ -164,6 +171,9 @@ end return nothing end +############################################################################### +# Local minimum/maximum limiting + @inline function idp_local_minmax!(alpha, limiter, u, t, dt, semi) for variable in limiter.local_minmax_variables_cons idp_local_minmax!(alpha, limiter, u, t, dt, semi, variable) @@ -233,16 +243,36 @@ end return nothing end +############################################################################### +# Global positivity limiting + @inline function idp_positivity!(alpha, limiter, u, dt, semi) # Conservative variables for variable in limiter.positivity_variables_cons - idp_positivity!(alpha, limiter, u, dt, semi, variable) + @trixi_timeit timer() "conservative variables" idp_positivity_conservative!(alpha, + limiter, + u, + dt, + semi, + variable) + end + + # Nonlinear variables + for variable in limiter.positivity_variables_nonlinear + @trixi_timeit timer() "nonlinear variables" idp_positivity_nonlinear!(alpha, + limiter, + u, dt, + semi, + variable) end return nothing end -@inline function idp_positivity!(alpha, limiter, u, dt, semi, variable) +############################################################################### +# Global positivity limiting of conservative variables + +@inline function idp_positivity_conservative!(alpha, limiter, u, dt, semi, variable) mesh, equations, dg, cache = mesh_equations_solver_cache(semi) (; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes (; inverse_weights) = dg.basis @@ -256,7 +286,7 @@ end for j in eachnode(dg), i in eachnode(dg) var = u[variable, i, j, element] if var < 0 - error("Safe $variable is not safe. element=$element, node: $i $j, value=$var") + error("Safe low-order method produces negative value for conservative variable $variable. Try a smaller time step.") end # Compute bound @@ -302,4 +332,183 @@ end return nothing end + +@inline function idp_positivity_nonlinear!(alpha, limiter, u, dt, semi, variable) + _, equations, dg, cache = mesh_equations_solver_cache(semi) + (; positivity_correction_factor) = limiter + + (; variable_bounds) = limiter.cache.subcell_limiter_coefficients + var_min = variable_bounds[Symbol(string(variable), "_min")] + + @threaded for element in eachelement(dg, semi.cache) + inverse_jacobian = cache.elements.inverse_jacobian[element] + for j in eachnode(dg), i in eachnode(dg) + # Compute bound + u_local = get_node_vars(u, equations, dg, i, j, element) + var = variable(u_local, equations) + if var < 0 + error("Safe low-order method produces negative value for variable $variable. Try a smaller time step.") + end + var_min[i, j, element] = positivity_correction_factor * var + + # Perform Newton's bisection method to find new alpha + newton_loops_alpha!(alpha, var_min[i, j, element], u_local, i, j, element, + variable, initial_check_nonnegative_newton_idp, + final_check_nonnegative_newton_idp, inverse_jacobian, + dt, equations, dg, cache, limiter) + end + end + + return nothing +end + +@inline function newton_loops_alpha!(alpha, bound, u, i, j, element, variable, + initial_check, final_check, inverse_jacobian, dt, + equations, dg, cache, limiter) + (; inverse_weights) = dg.basis + (; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes + + (; gamma_constant_newton) = limiter + + # negative xi direction + antidiffusive_flux = gamma_constant_newton * inverse_jacobian * inverse_weights[i] * + get_node_vars(antidiffusive_flux1_R, equations, dg, i, j, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + # positive xi direction + antidiffusive_flux = -gamma_constant_newton * inverse_jacobian * + inverse_weights[i] * + get_node_vars(antidiffusive_flux1_L, equations, dg, i + 1, j, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + # negative eta direction + antidiffusive_flux = gamma_constant_newton * inverse_jacobian * inverse_weights[j] * + get_node_vars(antidiffusive_flux2_R, equations, dg, i, j, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + # positive eta direction + antidiffusive_flux = -gamma_constant_newton * inverse_jacobian * + inverse_weights[j] * + get_node_vars(antidiffusive_flux2_L, equations, dg, i, j + 1, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + return nothing +end + +@inline function newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, + final_check, equations, dt, limiter, antidiffusive_flux) + newton_reltol, newton_abstol = limiter.newton_tolerances + + beta = 1 - alpha[i, j, element] + + beta_L = 0 # alpha = 1 + beta_R = beta # No higher beta (lower alpha) than the current one + + u_curr = u + beta * dt * antidiffusive_flux + + # If state is valid, perform initial check and return if correction is not needed + if isvalid(u_curr, equations) + goal = goal_function_newton_idp(variable, bound, u_curr, equations) + + initial_check(bound, goal, newton_abstol) && return nothing + end + + # Newton iterations + for iter in 1:(limiter.max_iterations_newton) + beta_old = beta + + # If the state is valid, evaluate d(goal)/d(beta) + if isvalid(u_curr, equations) + dgoal_dbeta = dgoal_function_newton_idp(variable, u_curr, dt, + antidiffusive_flux, equations) + else # Otherwise, perform a bisection step + dgoal_dbeta = 0 + end + + if dgoal_dbeta != 0 + # Update beta with Newton's method + beta = beta - goal / dgoal_dbeta + end + + # Check bounds + if (beta < beta_L) || (beta > beta_R) || (dgoal_dbeta == 0) || isnan(beta) + # Out of bounds, do a bisection step + beta = 0.5 * (beta_L + beta_R) + # Get new u + u_curr = u + beta * dt * antidiffusive_flux + + # If the state is invalid, finish bisection step without checking tolerance and iterate further + if !isvalid(u_curr, equations) + beta_R = beta + continue + end + + # Check new beta for condition and update bounds + goal = goal_function_newton_idp(variable, bound, u_curr, equations) + if initial_check(bound, goal, newton_abstol) + # New beta fulfills condition + beta_L = beta + else + # New beta does not fulfill condition + beta_R = beta + end + else + # Get new u + u_curr = u + beta * dt * antidiffusive_flux + + # If the state is invalid, redefine right bound without checking tolerance and iterate further + if !isvalid(u_curr, equations) + beta_R = beta + continue + end + + # Evaluate goal function + goal = goal_function_newton_idp(variable, bound, u_curr, equations) + end + + # Check relative tolerance + if abs(beta_old - beta) <= newton_reltol + break + end + + # Check absolute tolerance + if final_check(bound, goal, newton_abstol) + break + end + end + + new_alpha = 1 - beta + if alpha[i, j, element] > new_alpha + newton_abstol + error("Alpha is getting smaller. old: $(alpha[i, j, element]), new: $new_alpha") + else + alpha[i, j, element] = new_alpha + end + + return nothing +end + +### Auxiliary routines for Newton's bisection method ### +# Initial checks +@inline initial_check_nonnegative_newton_idp(bound, goal, newton_abstol) = goal <= 0 + +# Goal and d(Goal)d(u) function +@inline goal_function_newton_idp(variable, bound, u, equations) = bound - + variable(u, equations) +@inline function dgoal_function_newton_idp(variable, u, dt, antidiffusive_flux, + equations) + -dot(gradient_conservative(variable, u, equations), dt * antidiffusive_flux) +end + +# Final checks +@inline function final_check_nonnegative_newton_idp(bound, goal, newton_abstol) + (goal <= eps()) && (goal > -max(newton_abstol, abs(bound) * newton_abstol)) +end end # @muladd diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl index 61b5c54b5e9..b937abe92c0 100644 --- a/test/test_tree_2d_euler.jl +++ b/test/test_tree_2d_euler.jl @@ -581,6 +581,32 @@ end end end +@trixi_testset "elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl"), + l2=[ + 0.42185634563805724, + 0.1686471269704017, + 0.18240674916968103, + 0.17858250604280654, + ], + linf=[ + 1.7012978064377158, + 0.7149714986746726, + 0.5822547982757897, + 0.7300051017382696, + ], + tspan=(0.0, 2.0)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15000 + end +end + @trixi_testset "elixir_euler_colliding_flow.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_colliding_flow.jl"), l2=[ diff --git a/test/test_tree_2d_mhd.jl b/test/test_tree_2d_mhd.jl index 953c077c0a3..1f8458075aa 100644 --- a/test/test_tree_2d_mhd.jl +++ b/test/test_tree_2d_mhd.jl @@ -332,24 +332,28 @@ end @trixi_testset "elixir_mhd_shockcapturing_subcell.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_shockcapturing_subcell.jl"), - l2=[2.9974425783503109e-02, - 7.2849646345685956e-02, - 7.2488477174662239e-02, + l2=[ + 3.2064026219236076e-02, + 7.2461094392606618e-02, + 7.2380202888062711e-02, 0.0000000000000000e+00, - 1.2507971380965512e+00, - 1.8929505145499678e-02, - 1.2218606317164420e-02, + 8.6293936673145932e-01, + 8.4091669534557805e-03, + 5.2156364913231732e-03, 0.0000000000000000e+00, - 3.0154796910479838e-03], - linf=[3.2147382412340830e-01, - 1.3709471664007811e+00, - 1.3465154685288383e+00, + 2.0786952301129021e-04, + ], + linf=[ + 3.8778760255775635e-01, + 9.4666683953698927e-01, + 9.4618924645661928e-01, 0.0000000000000000e+00, - 1.6051257523415284e+01, - 3.0564266749926644e-01, - 2.3908016329805595e-01, + 1.0980297261521951e+01, + 1.0264404591009069e-01, + 1.0655686942176350e-01, 0.0000000000000000e+00, - 1.3711262178549158e-01], + 6.1013422157115546e-03, + ], tspan=(0.0, 0.003)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_unit.jl b/test/test_unit.jl index e8a8effbe29..7943d952f71 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -416,7 +416,8 @@ end indicator_hg = IndicatorHennemannGassner(1.0, 0.0, true, "variable", "cache") @test_nowarn show(stdout, indicator_hg) - limiter_idp = SubcellLimiterIDP(true, [1], true, [1], 0.1, "cache") + limiter_idp = SubcellLimiterIDP(true, [1], true, [1], ["variable"], 0.1, "cache", 1, + (1.0, 1.0), 1.0) @test_nowarn show(stdout, limiter_idp) # TODO: TrixiShallowWater: move unit test @@ -1220,6 +1221,26 @@ end end end +@testset "Consistency check for `gradient_conservative` routine" begin + # Set up conservative variables, equations + u = [ + 0.5011914484393387, + 0.8829127712445113, + 0.43024132987932817, + 0.7560616633050348, + ] + + equations = CompressibleEulerEquations2D(1.4) + + # Define wrapper function for pressure in order to call default implementation + function pressure_test(u, equations) + return pressure(u, equations) + end + + @test Trixi.gradient_conservative(pressure_test, u, equations) ≈ + Trixi.gradient_conservative(pressure, u, equations) +end + @testset "Equivalent Fluxes" begin # Set up equations and dummy conservative variables state # Burgers' Equation From e2c92f32457e22d6f8b766bf1ecd7a25d413cc6e Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 1 Feb 2024 09:35:16 +0100 Subject: [PATCH 16/22] Move Jacobian for para P4est to respective files, add muladd (#1807) * Move Jacobian for para P4est to respective files, add muladd * fmt * compare checks without muladd * update test val for muladd * test vals --------- Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> --- src/solvers/dgsem_p4est/dg_2d_parabolic.jl | 176 +++++++++++++++------ src/solvers/dgsem_p4est/dg_3d_parabolic.jl | 137 +++++++++++----- src/solvers/dgsem_tree/dg_2d_parabolic.jl | 18 --- src/solvers/dgsem_tree/dg_3d_parabolic.jl | 18 --- test/test_parabolic_2d.jl | 4 +- 5 files changed, 228 insertions(+), 125 deletions(-) diff --git a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl index 299f2f6140a..ed21f371449 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl @@ -1,7 +1,15 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + # This method is called when a SemidiscretizationHyperbolicParabolic is constructed. # It constructs the basic `cache` used throughout the simulation to compute # the RHS etc. -function create_cache_parabolic(mesh::P4estMesh{2}, equations_hyperbolic::AbstractEquations, +function create_cache_parabolic(mesh::P4estMesh{2}, + equations_hyperbolic::AbstractEquations, equations_parabolic::AbstractEquationsParabolic, dg::DG, parabolic_scheme, RealT, uEltype) balance!(mesh) @@ -167,12 +175,14 @@ function calc_gradient!(gradients, u_transformed, t, element) for ii in eachnode(dg) - multiply_add_to_node_vars!(gradients_x, derivative_dhat[ii, i], u_node, + multiply_add_to_node_vars!(gradients_x, derivative_dhat[ii, i], + u_node, equations_parabolic, dg, ii, j, element) end for jj in eachnode(dg) - multiply_add_to_node_vars!(gradients_y, derivative_dhat[jj, j], u_node, + multiply_add_to_node_vars!(gradients_y, derivative_dhat[jj, j], + u_node, equations_parabolic, dg, i, jj, element) end end @@ -185,9 +195,11 @@ function calc_gradient!(gradients, u_transformed, t, Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, element) - gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, dg, + gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, + dg, i, j, element) - gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, dg, + gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, + dg, i, j, element) # note that the contravariant vectors are transposed compared with computations of flux @@ -199,9 +211,11 @@ function calc_gradient!(gradients, u_transformed, t, gradient_y_node = Ja12 * gradients_reference_1 + Ja22 * gradients_reference_2 - set_node_vars!(gradients_x, gradient_x_node, equations_parabolic, dg, i, j, + set_node_vars!(gradients_x, gradient_x_node, equations_parabolic, dg, i, + j, element) - set_node_vars!(gradients_y, gradient_y_node, equations_parabolic, dg, i, j, + set_node_vars!(gradients_y, gradient_y_node, equations_parabolic, dg, i, + j, element) end end @@ -219,7 +233,8 @@ function calc_gradient!(gradients, u_transformed, t, @trixi_timeit timer() "interface flux" begin calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, False(), # False() = no nonconservative terms - equations_parabolic, dg.surface_integral, dg, cache_parabolic) + equations_parabolic, dg.surface_integral, dg, + cache_parabolic) end # Prolong solution to boundaries @@ -231,7 +246,8 @@ function calc_gradient!(gradients, u_transformed, t, # Calculate boundary fluxes @trixi_timeit timer() "boundary flux" begin calc_boundary_flux_gradients!(cache_parabolic, t, boundary_conditions_parabolic, - mesh, equations_parabolic, dg.surface_integral, dg) + mesh, equations_parabolic, dg.surface_integral, + dg) end # Prolong solution to mortars. This resues the hyperbolic version of `prolong2mortars` @@ -268,70 +284,94 @@ function calc_gradient!(gradients, u_transformed, t, # Compute x-component of gradients # surface at -x - normal_direction_x, _ = get_normal_direction(1, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(1, + contravariant_vectors, 1, l, element) gradients_x[v, 1, l, element] = (gradients_x[v, 1, l, element] + - surface_flux_values[v, l, 1, element] * + surface_flux_values[v, l, 1, + element] * factor_1 * normal_direction_x) # surface at +x - normal_direction_x, _ = get_normal_direction(2, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(2, + contravariant_vectors, nnodes(dg), l, element) - gradients_x[v, nnodes(dg), l, element] = (gradients_x[v, nnodes(dg), l, + gradients_x[v, nnodes(dg), l, element] = (gradients_x[v, nnodes(dg), + l, element] + - surface_flux_values[v, l, 2, + surface_flux_values[v, l, + 2, element] * - factor_2 * normal_direction_x) + factor_2 * + normal_direction_x) # surface at -y - normal_direction_x, _ = get_normal_direction(3, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(3, + contravariant_vectors, l, 1, element) gradients_x[v, l, 1, element] = (gradients_x[v, l, 1, element] + - surface_flux_values[v, l, 3, element] * + surface_flux_values[v, l, 3, + element] * factor_1 * normal_direction_x) # surface at +y - normal_direction_x, _ = get_normal_direction(4, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(4, + contravariant_vectors, l, nnodes(dg), element) - gradients_x[v, l, nnodes(dg), element] = (gradients_x[v, l, nnodes(dg), + gradients_x[v, l, nnodes(dg), element] = (gradients_x[v, l, + nnodes(dg), element] + - surface_flux_values[v, l, 4, + surface_flux_values[v, l, + 4, element] * - factor_2 * normal_direction_x) + factor_2 * + normal_direction_x) # Compute y-component of gradients # surface at -x - _, normal_direction_y = get_normal_direction(1, contravariant_vectors, + _, normal_direction_y = get_normal_direction(1, + contravariant_vectors, 1, l, element) gradients_y[v, 1, l, element] = (gradients_y[v, 1, l, element] + - surface_flux_values[v, l, 1, element] * + surface_flux_values[v, l, 1, + element] * factor_1 * normal_direction_y) # surface at +x - _, normal_direction_y = get_normal_direction(2, contravariant_vectors, + _, normal_direction_y = get_normal_direction(2, + contravariant_vectors, nnodes(dg), l, element) - gradients_y[v, nnodes(dg), l, element] = (gradients_y[v, nnodes(dg), l, + gradients_y[v, nnodes(dg), l, element] = (gradients_y[v, nnodes(dg), + l, element] + - surface_flux_values[v, l, 2, + surface_flux_values[v, l, + 2, element] * - factor_2 * normal_direction_y) + factor_2 * + normal_direction_y) # surface at -y - _, normal_direction_y = get_normal_direction(3, contravariant_vectors, + _, normal_direction_y = get_normal_direction(3, + contravariant_vectors, l, 1, element) gradients_y[v, l, 1, element] = (gradients_y[v, l, 1, element] + - surface_flux_values[v, l, 3, element] * + surface_flux_values[v, l, 3, + element] * factor_1 * normal_direction_y) # surface at +y - _, normal_direction_y = get_normal_direction(4, contravariant_vectors, + _, normal_direction_y = get_normal_direction(4, + contravariant_vectors, l, nnodes(dg), element) - gradients_y[v, l, nnodes(dg), element] = (gradients_y[v, l, nnodes(dg), + gradients_y[v, l, nnodes(dg), element] = (gradients_y[v, l, + nnodes(dg), element] + - surface_flux_values[v, l, 4, + surface_flux_values[v, l, + 4, element] * - factor_2 * normal_direction_y) + factor_2 * + normal_direction_y) end end end @@ -444,24 +484,30 @@ function calc_volume_integral!(du, flux_viscous, @threaded for element in eachelement(dg, cache) # Calculate volume terms in one element for j in eachnode(dg), i in eachnode(dg) - flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, element) - flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, element) + flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, + element) + flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, + element) # Compute the contravariant flux by taking the scalar product of the # first contravariant vector Ja^1 and the flux vector - Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, element) + Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, + element) contravariant_flux1 = Ja11 * flux1 + Ja12 * flux2 for ii in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[ii, i], contravariant_flux1, + multiply_add_to_node_vars!(du, derivative_dhat[ii, i], + contravariant_flux1, equations_parabolic, dg, ii, j, element) end # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, element) + Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, + element) contravariant_flux2 = Ja21 * flux1 + Ja22 * flux2 for jj in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[jj, j], contravariant_flux2, + multiply_add_to_node_vars!(du, derivative_dhat[jj, j], + contravariant_flux2, equations_parabolic, dg, i, jj, element) end end @@ -503,7 +549,8 @@ function prolong2interfaces!(cache_parabolic, flux_viscous, # this is the outward normal direction on the primary element normal_direction = get_normal_direction(primary_direction, contravariant_vectors, - i_primary, j_primary, primary_element) + i_primary, j_primary, + primary_element) for v in eachvariable(equations_parabolic) # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*! @@ -602,7 +649,8 @@ function calc_interface_flux!(surface_flux_values, # primary element. We assume a BR-1 type of flux. viscous_flux_normal_ll, viscous_flux_normal_rr = get_surface_node_vars(cache_parabolic.interfaces.u, equations_parabolic, - dg, node, + dg, + node, interface) flux = 0.5 * (viscous_flux_normal_ll + viscous_flux_normal_rr) @@ -624,9 +672,11 @@ function calc_interface_flux!(surface_flux_values, end function prolong2mortars_divergence!(cache, flux_viscous::Vector{Array{uEltype, 4}}, - mesh::Union{P4estMesh{2}, T8codeMesh{2}}, equations, + mesh::Union{P4estMesh{2}, T8codeMesh{2}}, + equations, mortar_l2::LobattoLegendreMortarL2, - surface_integral, dg::DGSEM) where {uEltype <: Real} + surface_integral, + dg::DGSEM) where {uEltype <: Real} @unpack neighbor_ids, node_indices = cache.mortars @unpack contravariant_vectors = cache.elements index_range = eachnode(dg) @@ -683,7 +733,8 @@ function prolong2mortars_divergence!(cache, flux_viscous::Vector{Array{uEltype, j_large = j_large_start element = neighbor_ids[3, mortar] for i in eachnode(dg) - normal_direction = get_normal_direction(direction_index, contravariant_vectors, + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, i_large, j_large, element) for v in eachvariable(equations) @@ -732,8 +783,10 @@ function calc_mortar_flux_divergence!(surface_flux_values, for position in 1:2 for node in eachnode(dg) for v in eachvariable(equations) - viscous_flux_normal_ll = cache.mortars.u[1, v, position, node, mortar] - viscous_flux_normal_rr = cache.mortars.u[2, v, position, node, mortar] + viscous_flux_normal_ll = cache.mortars.u[1, v, position, node, + mortar] + viscous_flux_normal_rr = cache.mortars.u[2, v, position, node, + mortar] # TODO: parabolic; only BR1 at the moment fstar[position][v, node] = 0.5 * (viscous_flux_normal_ll + @@ -824,7 +877,8 @@ end function calc_boundary_flux_gradients!(cache, t, boundary_condition::Union{BoundaryConditionPeriodic, BoundaryConditionDoNothing}, - mesh::P4estMesh, equations, surface_integral, dg::DG) + mesh::P4estMesh, equations, surface_integral, + dg::DG) @assert isempty(eachboundary(dg, cache)) end @@ -913,7 +967,8 @@ function calc_boundary_flux!(cache, t, boundary_index) # Outward-pointing normal direction (not normalized) - normal_direction = get_normal_direction(direction_index, contravariant_vectors, + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, i_node, j_node, element) # TODO: revisit if we want more general boundary treatments. @@ -922,11 +977,13 @@ function calc_boundary_flux!(cache, t, flux_inner = u_inner # Coordinates at boundary node - x = get_node_coords(node_coordinates, equations_parabolic, dg, i_node, j_node, + x = get_node_coords(node_coordinates, equations_parabolic, dg, i_node, + j_node, element) flux_ = boundary_condition_parabolic(flux_inner, u_inner, normal_direction, - x, t, operator_type, equations_parabolic) + x, t, operator_type, + equations_parabolic) # Copy flux to element storage in the correct orientation for v in eachvariable(equations_parabolic) @@ -938,3 +995,22 @@ function calc_boundary_flux!(cache, t, end end end + +function apply_jacobian_parabolic!(du, mesh::P4estMesh{2}, + equations::AbstractEquationsParabolic, + dg::DG, cache) + @unpack inverse_jacobian = cache.elements + + @threaded for element in eachelement(dg, cache) + for j in eachnode(dg), i in eachnode(dg) + factor = inverse_jacobian[i, j, element] + + for v in eachvariable(equations) + du[v, i, j, element] *= factor + end + end + end + + return nothing +end +end # @muladd diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl index 83d663809a7..63d431d35d5 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl @@ -1,7 +1,15 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + # This method is called when a SemidiscretizationHyperbolicParabolic is constructed. # It constructs the basic `cache` used throughout the simulation to compute # the RHS etc. -function create_cache_parabolic(mesh::P4estMesh{3}, equations_hyperbolic::AbstractEquations, +function create_cache_parabolic(mesh::P4estMesh{3}, + equations_hyperbolic::AbstractEquations, equations_parabolic::AbstractEquationsParabolic, dg::DG, parabolic_scheme, RealT, uEltype) balance!(mesh) @@ -73,11 +81,14 @@ function calc_gradient!(gradients, u_transformed, t, Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, k, element) - gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, dg, + gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, + dg, i, j, k, element) - gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, dg, + gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, + dg, i, j, k, element) - gradients_reference_3 = get_node_vars(gradients_z, equations_parabolic, dg, + gradients_reference_3 = get_node_vars(gradients_z, equations_parabolic, + dg, i, j, k, element) # note that the contravariant vectors are transposed compared with computations of flux @@ -115,7 +126,8 @@ function calc_gradient!(gradients, u_transformed, t, @trixi_timeit timer() "interface flux" begin calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, False(), # False() = no nonconservative terms - equations_parabolic, dg.surface_integral, dg, cache_parabolic) + equations_parabolic, dg.surface_integral, dg, + cache_parabolic) end # Prolong solution to boundaries @@ -127,7 +139,8 @@ function calc_gradient!(gradients, u_transformed, t, # Calculate boundary fluxes @trixi_timeit timer() "boundary flux" begin calc_boundary_flux_gradients!(cache_parabolic, t, boundary_conditions_parabolic, - mesh, equations_parabolic, dg.surface_integral, dg) + mesh, equations_parabolic, dg.surface_integral, + dg) end # Prolong solution to mortars. These should reuse the hyperbolic version of `prolong2mortars` @@ -165,7 +178,8 @@ function calc_gradient!(gradients, u_transformed, t, for dim in 1:3 grad = gradients[dim] # surface at -x - normal_direction = get_normal_direction(1, contravariant_vectors, + normal_direction = get_normal_direction(1, + contravariant_vectors, 1, l, m, element) grad[v, 1, l, m, element] = (grad[v, 1, l, m, element] + surface_flux_values[v, l, m, 1, @@ -173,18 +187,22 @@ function calc_gradient!(gradients, u_transformed, t, factor_1 * normal_direction[dim]) # surface at +x - normal_direction = get_normal_direction(2, contravariant_vectors, - nnodes(dg), l, m, element) + normal_direction = get_normal_direction(2, + contravariant_vectors, + nnodes(dg), l, m, + element) grad[v, nnodes(dg), l, m, element] = (grad[v, nnodes(dg), l, m, element] + - surface_flux_values[v, l, m, + surface_flux_values[v, l, + m, 2, element] * factor_2 * normal_direction[dim]) # surface at -y - normal_direction = get_normal_direction(3, contravariant_vectors, + normal_direction = get_normal_direction(3, + contravariant_vectors, l, m, 1, element) grad[v, l, 1, m, element] = (grad[v, l, 1, m, element] + surface_flux_values[v, l, m, 3, @@ -192,18 +210,22 @@ function calc_gradient!(gradients, u_transformed, t, factor_1 * normal_direction[dim]) # surface at +y - normal_direction = get_normal_direction(4, contravariant_vectors, - l, nnodes(dg), m, element) + normal_direction = get_normal_direction(4, + contravariant_vectors, + l, nnodes(dg), m, + element) grad[v, l, nnodes(dg), m, element] = (grad[v, l, nnodes(dg), m, element] + - surface_flux_values[v, l, m, + surface_flux_values[v, l, + m, 4, element] * factor_2 * normal_direction[dim]) # surface at -z - normal_direction = get_normal_direction(5, contravariant_vectors, + normal_direction = get_normal_direction(5, + contravariant_vectors, l, m, 1, element) grad[v, l, m, 1, element] = (grad[v, l, m, 1, element] + surface_flux_values[v, l, m, 5, @@ -211,11 +233,14 @@ function calc_gradient!(gradients, u_transformed, t, factor_1 * normal_direction[dim]) # surface at +z - normal_direction = get_normal_direction(6, contravariant_vectors, - l, m, nnodes(dg), element) + normal_direction = get_normal_direction(6, + contravariant_vectors, + l, m, nnodes(dg), + element) grad[v, l, m, nnodes(dg), element] = (grad[v, l, m, nnodes(dg), element] + - surface_flux_values[v, l, m, + surface_flux_values[v, l, + m, 6, element] * factor_2 * @@ -366,37 +391,46 @@ function calc_volume_integral!(du, flux_viscous, @threaded for element in eachelement(dg, cache) # Calculate volume terms in one element for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) - flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, k, element) - flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, k, element) - flux3 = get_node_vars(flux_viscous_z, equations_parabolic, dg, i, j, k, element) + flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, k, + element) + flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, k, + element) + flux3 = get_node_vars(flux_viscous_z, equations_parabolic, dg, i, j, k, + element) # Compute the contravariant flux by taking the scalar product of the # first contravariant vector Ja^1 and the flux vector - Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, k, + Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, + k, element) contravariant_flux1 = Ja11 * flux1 + Ja12 * flux2 + Ja13 * flux3 for ii in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[ii, i], contravariant_flux1, + multiply_add_to_node_vars!(du, derivative_dhat[ii, i], + contravariant_flux1, equations_parabolic, dg, ii, j, k, element) end # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, k, + Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, + k, element) contravariant_flux2 = Ja21 * flux1 + Ja22 * flux2 + Ja23 * flux3 for jj in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[jj, j], contravariant_flux2, + multiply_add_to_node_vars!(du, derivative_dhat[jj, j], + contravariant_flux2, equations_parabolic, dg, i, jj, k, element) end # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, k, + Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, + k, element) contravariant_flux3 = Ja31 * flux1 + Ja32 * flux2 + Ja33 * flux3 for kk in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[kk, k], contravariant_flux3, + multiply_add_to_node_vars!(du, derivative_dhat[kk, k], + contravariant_flux3, equations_parabolic, dg, i, j, kk, element) end end @@ -574,7 +608,8 @@ function calc_interface_flux!(surface_flux_values, viscous_flux_normal_ll, viscous_flux_normal_rr = get_surface_node_vars(cache_parabolic.interfaces.u, equations_parabolic, dg, - i, j, + i, + j, interface) flux = 0.5 * (viscous_flux_normal_ll + viscous_flux_normal_rr) @@ -606,7 +641,8 @@ function calc_interface_flux!(surface_flux_values, end function prolong2mortars_divergence!(cache, flux_viscous, - mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack neighbor_ids, node_indices = cache.mortars @@ -642,11 +678,14 @@ function prolong2mortars_divergence!(cache, flux_viscous, element) for v in eachvariable(equations) - flux_viscous = SVector(flux_viscous_x[v, i_small, j_small, k_small, + flux_viscous = SVector(flux_viscous_x[v, i_small, j_small, + k_small, element], - flux_viscous_y[v, i_small, j_small, k_small, + flux_viscous_y[v, i_small, j_small, + k_small, element], - flux_viscous_z[v, i_small, j_small, k_small, + flux_viscous_z[v, i_small, j_small, + k_small, element]) cache.mortars.u[1, v, position, i, j, mortar] = dot(flux_viscous, @@ -688,7 +727,8 @@ function prolong2mortars_divergence!(cache, flux_viscous, for i in eachnode(dg) normal_direction = get_normal_direction(direction_index, contravariant_vectors, - i_large, j_large, k_large, element) + i_large, j_large, k_large, + element) for v in eachvariable(equations) flux_viscous = SVector(flux_viscous_x[v, i_large, j_large, k_large, @@ -827,7 +867,8 @@ end # TODO: parabolic; only BR1 at the moment flux_ = 0.5 * (u_ll + u_rr) # Copy flux to buffer - set_node_vars!(fstar, flux_, equations, dg, i_node_index, j_node_index, position_index) + set_node_vars!(fstar, flux_, equations, dg, i_node_index, j_node_index, + position_index) end # TODO: parabolic, finish implementing `calc_boundary_flux_gradients!` and `calc_boundary_flux_divergence!` @@ -862,7 +903,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous, for j in eachnode(dg) for i in eachnode(dg) # this is the outward normal direction on the primary element - normal_direction = get_normal_direction(direction, contravariant_vectors, + normal_direction = get_normal_direction(direction, + contravariant_vectors, i_node, j_node, k_node, element) for v in eachvariable(equations_parabolic) @@ -873,7 +915,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous, flux_viscous_z[v, i_node, j_node, k_node, element]) - boundaries.u[v, i, j, boundary] = dot(flux_viscous, normal_direction) + boundaries.u[v, i, j, boundary] = dot(flux_viscous, + normal_direction) end i_node += i_node_step_i j_node += j_node_step_i @@ -940,7 +983,8 @@ function calc_boundary_flux!(cache, t, j_node, k_node, element) - flux_ = boundary_condition_parabolic(flux_inner, u_inner, normal_direction, + flux_ = boundary_condition_parabolic(flux_inner, u_inner, + normal_direction, x, t, operator_type, equations_parabolic) @@ -959,3 +1003,22 @@ function calc_boundary_flux!(cache, t, end end end + +function apply_jacobian_parabolic!(du, mesh::P4estMesh{3}, + equations::AbstractEquationsParabolic, + dg::DG, cache) + @unpack inverse_jacobian = cache.elements + + @threaded for element in eachelement(dg, cache) + for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) + factor = inverse_jacobian[i, j, k, element] + + for v in eachvariable(equations) + du[v, i, j, k, element] *= factor + end + end + end + + return nothing +end +end # @muladd diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl index b1c27343999..a6c962e03cd 100644 --- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl @@ -951,22 +951,4 @@ function apply_jacobian_parabolic!(du, mesh::TreeMesh{2}, return nothing end - -function apply_jacobian_parabolic!(du, mesh::P4estMesh{2}, - equations::AbstractEquationsParabolic, - dg::DG, cache) - @unpack inverse_jacobian = cache.elements - - @threaded for element in eachelement(dg, cache) - for j in eachnode(dg), i in eachnode(dg) - factor = inverse_jacobian[i, j, element] - - for v in eachvariable(equations) - du[v, i, j, element] *= factor - end - end - end - - return nothing -end end # @muladd diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl index ee0e7c6b069..d5504744742 100644 --- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl @@ -1033,22 +1033,4 @@ function apply_jacobian_parabolic!(du, mesh::TreeMesh{3}, return nothing end - -function apply_jacobian_parabolic!(du, mesh::P4estMesh{3}, - equations::AbstractEquationsParabolic, - dg::DG, cache) - @unpack inverse_jacobian = cache.elements - - @threaded for element in eachelement(dg, cache) - for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) - factor = inverse_jacobian[i, j, k, element] - - for v in eachvariable(equations) - du[v, i, j, k, element] *= factor - end - end - end - - return nothing -end end # @muladd diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index 6632cd0bb27..f7185a1a904 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -561,8 +561,8 @@ end @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_advection_diffusion_nonperiodic_amr.jl"), tspan=(0.0, 0.01), - l2=[0.00793438523666649], - linf=[0.11030633127144573]) + l2=[0.007933791324450538], + linf=[0.11029480573492567]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let From 3ed62fb4e3bbd034b49fa452e4034c909b3a549b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Feb 2024 18:01:29 +0100 Subject: [PATCH 17/22] Bump actions/cache from 3 to 4 (#1828) Bumps [actions/cache](https://github.com/actions/cache) from 3 to 4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/benchmark.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 6aa4809c1c2..4531c3aee0a 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -26,7 +26,7 @@ jobs: version: ${{ matrix.version }} arch: ${{ matrix.arch }} show-versioninfo: true - - uses: actions/cache@v3 + - uses: actions/cache@v4 env: cache-name: cache-artifacts with: From db1d7054b7832cb56dddda1076921dfd0476a2b2 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 2 Feb 2024 08:46:49 +0100 Subject: [PATCH 18/22] fix typo leafs -> leaves --- src/meshes/t8code_mesh.jl | 38 +++++++++++++++++++------------------- test/test_parabolic_2d.jl | 12 ++++++------ test/test_parabolic_3d.jl | 28 ++++++++++++++-------------- 3 files changed, 39 insertions(+), 39 deletions(-) diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index 6fb4d861d10..cb2ac787e14 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -1,7 +1,7 @@ """ T8codeMesh{NDIMS} <: AbstractMesh{NDIMS} -An unstructured curved mesh based on trees that uses the C library +An unstructured curved mesh based on trees that uses the C library ['t8code'](https://github.com/DLR-AMR/t8code) to manage trees and mesh refinement. """ @@ -485,7 +485,7 @@ end # form a family and we decide whether this family should be coarsened # or only the first element should be refined. # Otherwise `is_family` must equal zero and we consider the first entry -# of the element array for refinement. +# of the element array for refinement. # Entries of the element array beyond the first `num_elements` are undefined. # \param [in] forest the forest to which the new elements belong # \param [in] forest_from the forest that is adapted. @@ -542,8 +542,8 @@ Adapt a `T8codeMesh` according to a user-defined `adapt_callback`. 0 : Stay unchanged. 1 : Refine element. -- `kwargs`: - - `recursive = true`: Adapt the forest recursively. If true the caller must ensure that the callback +- `kwargs`: + - `recursive = true`: Adapt the forest recursively. If true the caller must ensure that the callback returns 0 for every analyzed element at some point to stop the recursion. - `balance = true`: Make sure the adapted forest is 2^(NDIMS-1):1 balanced. - `partition = true`: Partition the forest to redistribute elements evenly among MPI ranks. @@ -695,7 +695,7 @@ function count_interfaces(mesh::T8codeMesh) for iface in 0:(num_faces - 1) pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() - pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() + pneighbor_leaves_ref = Ref{Ptr{Ptr{t8_element}}}() pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() dual_faces_ref = Ref{Ptr{Cint}}() @@ -704,7 +704,7 @@ function count_interfaces(mesh::T8codeMesh) forest_is_balanced = Cint(1) t8_forest_leaf_face_neighbors(mesh.forest, itree, element, - pneighbor_leafs_ref, iface, dual_faces_ref, + pneighbor_leaves_ref, iface, dual_faces_ref, num_neighbors_ref, pelement_indices_ref, pneigh_scheme_ref, forest_is_balanced) @@ -713,13 +713,13 @@ function count_interfaces(mesh::T8codeMesh) dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], num_neighbors) - neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) + neighbor_leaves = unsafe_wrap(Array, pneighbor_leaves_ref[], num_neighbors) neighbor_scheme = pneigh_scheme_ref[] if num_neighbors == 0 local_num_boundary += 1 else - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leaves[1]) if all(neighbor_ielements .< num_local_elements) # Conforming interface: The second condition ensures we @@ -745,7 +745,7 @@ function count_interfaces(mesh::T8codeMesh) neighbor_linear_id = neighbor_global_ghost_itree * max_tree_num_elements + t8_element_get_linear_id(neighbor_scheme, - neighbor_leafs[1], + neighbor_leaves[1], max_level) global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + dual_faces[1] @@ -759,7 +759,7 @@ function count_interfaces(mesh::T8codeMesh) end t8_free(dual_faces_ref[]) - t8_free(pneighbor_leafs_ref[]) + t8_free(pneighbor_leaves_ref[]) t8_free(pelement_indices_ref[]) end # for @@ -875,7 +875,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, end pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() - pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() + pneighbor_leaves_ref = Ref{Ptr{Ptr{t8_element}}}() pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() dual_faces_ref = Ref{Ptr{Cint}}() @@ -885,7 +885,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, # Query neighbor information from t8code. t8_forest_leaf_face_neighbors(mesh.forest, itree, element, - pneighbor_leafs_ref, iface, dual_faces_ref, + pneighbor_leaves_ref, iface, dual_faces_ref, num_neighbors_ref, pelement_indices_ref, pneigh_scheme_ref, forest_is_balanced) @@ -894,7 +894,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], num_neighbors) - neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) + neighbor_leaves = unsafe_wrap(Array, pneighbor_leaves_ref[], num_neighbors) neighbor_scheme = pneigh_scheme_ref[] # Now we check for the different cases. The nested if-structure is as follows: @@ -913,7 +913,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, # else: // `local mortar from smaller elements point of view` # // We only count local mortars once. # - # else: // It must be either a MPI interface or a MPI mortar. + # else: // It must be either a MPI interface or a MPI mortar. # # if `MPI interface`: # @@ -938,7 +938,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, # Interface or mortar. else - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leaves[1]) # Local interface or mortar. if all(neighbor_ielements .< num_local_elements) @@ -985,7 +985,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, neighbor_linear_id = neighbor_global_ghost_itree * max_tree_num_elements + t8_element_get_linear_id(neighbor_scheme, - neighbor_leafs[1], + neighbor_leaves[1], max_level) if current_linear_id < neighbor_linear_id @@ -1029,7 +1029,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, num_local_elements) local_neighbor_ids = [neighbor_ids[i] for i in local_neighbor_positions] - local_neighbor_positions = [map_iface_to_ichild_to_position[dual_faces[1] + 1][t8_element_child_id(neighbor_scheme, neighbor_leafs[i]) + 1] + local_neighbor_positions = [map_iface_to_ichild_to_position[dual_faces[1] + 1][t8_element_child_id(neighbor_scheme, neighbor_leaves[i]) + 1] for i in local_neighbor_positions] # Last entry is the large element. @@ -1059,7 +1059,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, neighbor_linear_id = neighbor_global_ghost_itree * max_tree_num_elements + t8_element_get_linear_id(neighbor_scheme, - neighbor_leafs[1], + neighbor_leaves[1], max_level) global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + dual_faces[1] @@ -1100,7 +1100,7 @@ function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, end t8_free(dual_faces_ref[]) - t8_free(pneighbor_leafs_ref[]) + t8_free(pneighbor_leaves_ref[]) t8_free(pelement_indices_ref[]) end # for iface diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index f7185a1a904..9f1382caa62 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -218,9 +218,9 @@ end "elixir_advection_diffusion.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 8 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 8)]) + num_leaves = length(LLID) + @assert num_leaves % 8 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 8)]) tspan = (0.0, 1.5) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), @@ -414,9 +414,9 @@ end "elixir_navierstokes_convergence.jl"), tspan=(0.0, 0.0), initial_refinement_level=3) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 4 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 4)]) + num_leaves = length(LLID) + @assert num_leaves % 4 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 4)]) tspan = (0.0, 0.5) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver; diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index 6fbfb8259d4..1eaa9f51a56 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -252,9 +252,9 @@ end "elixir_navierstokes_convergence.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 16 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 16)]) + num_leaves = length(LLID) + @assert num_leaves % 16 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 16)]) tspan = (0.0, 0.25) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver; @@ -325,9 +325,9 @@ end "elixir_navierstokes_taylor_green_vortex.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 32 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 32)]) + num_leaves = length(LLID) + @assert num_leaves % 32 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 32)]) tspan = (0.0, 0.1) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver) @@ -429,8 +429,8 @@ end "elixir_advection_diffusion_amr.jl"), l2=[0.000355780485397024], linf=[0.0010810770271614256]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] @@ -444,8 +444,8 @@ end "elixir_advection_diffusion_nonperiodic.jl"), l2=[0.0009808996243280868], linf=[0.01732621559135459]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] @@ -472,8 +472,8 @@ end 0.12129218723807476, 0.8433893297612087, ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] @@ -495,8 +495,8 @@ end 0.6782397526873181, 0.17663702154066238, 0.17663702154066266, 0.17663702154066238, 1.7327849844825238, ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] From 7f7f058d0721bc1634e9fee69a8d136b9ea57bc7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Feb 2024 08:47:41 +0100 Subject: [PATCH 19/22] Bump crate-ci/typos from 1.16.26 to 1.18.0 (#1826) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.16.26 to 1.18.0. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.16.26...v1.18.0) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Hendrik Ranocha --- .github/workflows/SpellCheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml index a780e975155..b242b6e811e 100644 --- a/.github/workflows/SpellCheck.yml +++ b/.github/workflows/SpellCheck.yml @@ -10,4 +10,4 @@ jobs: - name: Checkout Actions Repository uses: actions/checkout@v4 - name: Check spelling - uses: crate-ci/typos@v1.16.26 + uses: crate-ci/typos@v1.18.0 From fa129aa3be558df6246b7339ce2bc966d76bcc6a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Feb 2024 14:47:13 +0100 Subject: [PATCH 20/22] Bump codecov/codecov-action from 3 to 4 (#1827) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 3 to 4. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v3...v4) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f287cc5feb2..2e388366fc8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -129,7 +129,7 @@ jobs: - uses: julia-actions/julia-processcoverage@v1 with: directories: src,examples,ext - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v4 with: file: ./lcov.info flags: unittests From ea61e26ff330e720b011a85a2f4b9040e54da870 Mon Sep 17 00:00:00 2001 From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> Date: Mon, 5 Feb 2024 11:50:25 +0100 Subject: [PATCH 21/22] Add section about false sharing problems to documentation (#1819) * Add section to docs about false sharing * Fix typos * Fix typo * Implement suggestions --- docs/src/performance.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/src/performance.md b/docs/src/performance.md index df66f451b79..82d7f501f63 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -267,3 +267,14 @@ requires. It can thus be seen as a proxy for "energy used" and, as an extension, timing result, you need to set the analysis interval such that the `AnalysisCallback` is invoked at least once during the course of the simulation and discard the first PID value. + +## Performance issues with multi-threaded reductions +[False sharing](https://en.wikipedia.org/wiki/False_sharing) is a known performance issue +for systems with distributed caches. It also occurred for the implementation of a thread +parallel bounds checking routine for the subcell IDP limiting +in [PR #1736](https://github.com/trixi-framework/Trixi.jl/pull/1736). +After some [testing and discussion](https://github.com/trixi-framework/Trixi.jl/pull/1736#discussion_r1423881895), +it turned out that initializing a vector of length `n * Threads.nthreads()` and only using every +n-th entry instead of a vector of length `Threads.nthreads()` fixes the problem. +Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)`. +Now, the bounds checking routine of the IDP limiting scales as hoped. From 14151e636ef654cb5421b3a7e498a9d76ed46a64 Mon Sep 17 00:00:00 2001 From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Date: Mon, 5 Feb 2024 12:49:51 +0100 Subject: [PATCH 22/22] Add entry for gmsh tutorial in introduction (#1829) * add entry for gmsh tutorial in introduction * Update docs/literate/src/files/index.jl --------- Co-authored-by: Daniel Doehring Co-authored-by: Daniel Doehring --- docs/literate/src/files/index.jl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/docs/literate/src/files/index.jl b/docs/literate/src/files/index.jl index e259d25fb2f..26637e5b24b 100644 --- a/docs/literate/src/files/index.jl +++ b/docs/literate/src/files/index.jl @@ -108,20 +108,26 @@ # software in the Trixi.jl ecosystem, and then run a simulation using Trixi.jl on said mesh. # In the end, the tutorial briefly explains how to simulate an example using AMR via `P4estMesh`. -# ### [15 Explicit time stepping](@ref time_stepping) +# ### [15 P4est mesh from gmsh](@ref p4est_from_gmsh) +#- +# This tutorial describes how to obtain a [`P4estMesh`](@ref) from an existing mesh generated +# by [`gmsh`](https://gmsh.info/) or any other meshing software that can export to the Abaqus +# input `.inp` format. The tutorial demonstrates how edges/faces can be associated with boundary conditions based on the physical nodesets. + +# ### [16 Explicit time stepping](@ref time_stepping) #- # This tutorial is about time integration using [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl). # It explains how to use their algorithms and presents two types of time step choices - with error-based # and CFL-based adaptive step size control. -# ### [16 Differentiable programming](@ref differentiable_programming) +# ### [17 Differentiable programming](@ref differentiable_programming) #- # This part deals with some basic differentiable programming topics. For example, a Jacobian, its # eigenvalues and a curve of total energy (through the simulation) are calculated and plotted for # a few semidiscretizations. Moreover, we calculate an example for propagating errors with Measurement.jl # at the end. -# ### [17 Custom semidiscretization](@ref custom_semidiscretization) +# ### [18 Custom semidiscretization](@ref custom_semidiscretization) #- # This tutorial describes the [semidiscretiations](@ref overview-semidiscretizations) of Trixi.jl # and explains how to extend them for custom tasks.