diff --git a/.github/workflows/Documenter.yml b/.github/workflows/Documenter.yml index 795f6349779..579e8348f46 100644 --- a/.github/workflows/Documenter.yml +++ b/.github/workflows/Documenter.yml @@ -38,6 +38,7 @@ jobs: with: version: '~1.10.0-0' show-versioninfo: true + - uses: julia-actions/cache@v1 - uses: julia-actions/julia-buildpkg@v1 env: PYTHON: "" diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml new file mode 100644 index 00000000000..c84b1026d1b --- /dev/null +++ b/.github/workflows/Downgrade.yml @@ -0,0 +1,86 @@ +name: Downgrade + +on: + pull_request: + paths-ignore: + - 'AUTHORS.md' + - 'CITATION.bib' + - 'CONTRIBUTING.md' + - 'LICENSE.md' + - 'NEWS.md' + - 'README.md' + - '.zenodo.json' + - '.github/workflows/benchmark.yml' + - '.github/workflows/CompatHelper.yml' + - '.github/workflows/TagBot.yml' + - 'benchmark/**' + - 'docs/**' + - 'utils/**' + workflow_dispatch: + +# Cancel redundant CI tests automatically +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + downgrade_test: + if: "!contains(github.event.head_commit.message, 'skip ci')" + # We could also include the Julia version as in + # name: ${{ matrix.trixi_test }} - ${{ matrix.os }} - Julia ${{ matrix.version }} - ${{ matrix.arch }} - ${{ github.event_name }} + # to be more specific. However, that requires us updating the required CI tests whenever we update Julia. + name: Downgrade ${{ matrix.trixi_test }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + version: + - '1.9' + # - '~1.9.0-0' # including development versions + # - 'nightly' + os: + - ubuntu-latest + arch: + - x64 + trixi_test: + # - tree_part1 + # - tree_part2 + # - tree_part3 + # - tree_part4 + # - tree_part5 + # - tree_part6 + # - structured + # - p4est_part1 + # - p4est_part2 + # - t8code_part1 + # - unstructured_dgmulti + # - parabolic + # - paper_self_gravitating_gas_dynamics + # - misc_part1 + # - misc_part2 + # - performance_specializations_part1 + # - performance_specializations_part2 + # - mpi + - threaded + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v1 + with: + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - run: julia -e 'using InteractiveUtils; versioninfo(verbose=true)' + - uses: julia-actions/cache@v1 + - uses: julia-actions/julia-downgrade-compat@v1 + with: + skip: LinearAlgebra,Printf,SparseArrays,DiffEqBase + projects: ., test + - uses: julia-actions/julia-buildpkg@v1 + env: + PYTHON: "" + - name: Run tests without coverage + uses: julia-actions/julia-runtest@v1 + with: + coverage: false + env: + PYTHON: "" + TRIXI_TEST: ${{ matrix.trixi_test }} diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml index a733cb7cc21..7297f1c3ff5 100644 --- a/.github/workflows/FormatCheck.yml +++ b/.github/workflows/FormatCheck.yml @@ -29,7 +29,7 @@ jobs: # TODO: Change the call below to # format(".") run: | - julia -e 'using Pkg; Pkg.add(PackageSpec(name = "JuliaFormatter"))' + julia -e 'using Pkg; Pkg.add(PackageSpec(name = "JuliaFormatter", version="1.0.45"))' julia -e 'using JuliaFormatter; format(["benchmark", "examples", "ext", "src", "test", "utils"])' - name: Format check run: | diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml index a780e975155..b242b6e811e 100644 --- a/.github/workflows/SpellCheck.yml +++ b/.github/workflows/SpellCheck.yml @@ -10,4 +10,4 @@ jobs: - name: Checkout Actions Repository uses: actions/checkout@v4 - name: Check spelling - uses: crate-ci/typos@v1.16.26 + uses: crate-ci/typos@v1.18.0 diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ee354f7c7e9..9ac7d5e94a7 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -26,7 +26,7 @@ jobs: version: ${{ matrix.version }} arch: ${{ matrix.arch }} show-versioninfo: true - - uses: actions/cache@v3 + - uses: actions/cache@v4 env: cache-name: cache-artifacts with: @@ -45,7 +45,7 @@ jobs: run: julia --project=benchmark/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' - name: Run benchmarks run: julia --project=benchmark/ --color=yes benchmark/run_benchmarks.jl - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: my-artifact path: benchmark/results*.md diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8dcbe854156..adc25241b79 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -70,6 +70,7 @@ jobs: - p4est_part1 - p4est_part2 - t8code_part1 + - t8code_part2 - unstructured_dgmulti - parabolic - paper_self_gravitating_gas_dynamics @@ -100,6 +101,10 @@ jobs: os: windows-latest arch: x64 trixi_test: threaded + - version: '1.9' + os: macos-14 + arch: arm64 + trixi_test: threaded steps: - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@v1 @@ -128,12 +133,13 @@ jobs: - uses: julia-actions/julia-processcoverage@v1 with: directories: src,examples,ext - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v4 with: file: ./lcov.info flags: unittests name: codecov-umbrella - fail_ci_if_error: false + fail_ci_if_error: true + verbose: true token: ${{ secrets.CODECOV_TOKEN }} # The standard setup of Coveralls is just annoying for parallel builds, see, e.g., # https://github.com/trixi-framework/Trixi.jl/issues/691 @@ -153,7 +159,7 @@ jobs: - shell: bash run: | cp ./lcov.info ./lcov-${{ matrix.trixi_test }}-${{ matrix.os }}-${{ matrix.version }}-${{ matrix.arch }}.info - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: lcov-${{ matrix.trixi_test }}-${{ matrix.os }}-${{ matrix.version }}-${{ matrix.arch }} path: ./lcov-${{ matrix.trixi_test }}-${{ matrix.os }}-${{ matrix.version }}-${{ matrix.arch }}.info @@ -176,7 +182,7 @@ jobs: # At first, we check out the repository and download all artifacts # (and list files for debugging). - uses: actions/checkout@v4 - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 - run: ls -R # Next, we merge the individual coverage files and upload # the combined results to Coveralls. @@ -194,12 +200,12 @@ jobs: coverage = merge_coverage_counts(coverage) @show covered_lines, total_lines = get_summary(coverage) LCOV.writefile("./lcov.info", coverage) - - uses: coverallsapp/github-action@master + - uses: coverallsapp/github-action@v2 with: github-token: ${{ secrets.GITHUB_TOKEN }} path-to-lcov: ./lcov.info # Upload merged coverage data as artifact for debugging - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: lcov path: ./lcov.info diff --git a/.gitignore b/.gitignore index 3132b9af38b..b4f1cf6bb47 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ *.mesh *.bson *.inp +*.msh **/Manifest.toml out*/ docs/build diff --git a/NEWS.md b/NEWS.md index cf695912ed7..feccd1f9852 100644 --- a/NEWS.md +++ b/NEWS.md @@ -9,6 +9,10 @@ for human readability. #### Added - AMR for hyperbolic-parabolic equations on 3D `P4estMesh` - `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` +- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, + can now be digested by Trixi in 2D and 3D. +- Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` +- Added Lighthill-Whitham-Richards (LWR) traffic model ## Changes when updating to v0.6 from v0.5.x diff --git a/Project.toml b/Project.toml index faf9f82d335..9bed045637a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.6-pre" +version = "0.6.10-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" @@ -9,6 +9,7 @@ ConstructionBase = "187b0558-2788-49d3-abe0-74a17ed4e7c9" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def" +Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" @@ -44,6 +45,7 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" Triangulate = "f7e6ffb2-c36d-4f8f-a77e-16e897189344" TriplotBase = "981d1d27-644d-49a2-9326-4793e63143c3" TriplotRecipes = "808ab39a-a642-4abf-81ff-4cb34ebbffa3" +TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284" [weakdeps] Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" @@ -57,19 +59,20 @@ ConstructionBase = "1.3" DataStructures = "0.18.15" DiffEqBase = "6 - 6.143" DiffEqCallbacks = "2.25" +Downloads = "1.6" EllipsisNotation = "1.0" FillArrays = "0.13.2, 1" -ForwardDiff = "0.10.18" -HDF5 = "0.14, 0.15, 0.16, 0.17" +ForwardDiff = "0.10.24" +HDF5 = "0.16.10, 0.17" IfElse = "0.1" LinearAlgebra = "1" LinearMaps = "2.7, 3.0" -LoopVectorization = "0.12.118" +LoopVectorization = "0.12.151" MPI = "0.20" -Makie = "0.19" +Makie = "0.19, 0.20" MuladdMacro = "0.2.2" -Octavian = "0.3.5" -OffsetArrays = "1.3" +Octavian = "0.3.21" +OffsetArrays = "1.12" P4est = "0.4.9" Polyester = "0.7.5" PrecompileTools = "1.1" @@ -78,21 +81,22 @@ RecipesBase = "1.1" Reexport = "1.0" Requires = "1.1" SciMLBase = "1.90, 2" -Setfield = "0.8, 1" +Setfield = "1" SimpleUnPack = "1.1" SparseArrays = "1" -StartUpDG = "0.17" -Static = "0.3, 0.4, 0.5, 0.6, 0.7, 0.8" +StartUpDG = "0.17.7" +Static = "0.8.7" StaticArrayInterface = "1.4" -StaticArrays = "1" -StrideArrays = "0.1.18" -StructArrays = "0.6" +StaticArrays = "1.5" +StrideArrays = "0.1.26" +StructArrays = "0.6.11" SummationByPartsOperators = "0.5.41" T8code = "0.4.3, 0.5" TimerOutputs = "0.5.7" -Triangulate = "2.0" +Triangulate = "2.2" TriplotBase = "0.1" TriplotRecipes = "0.1" +TrixiBase = "0.1.1" julia = "1.8" [extras] diff --git a/README.md b/README.md index c531ab4d1a4..71370d3478e 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ installation and postprocessing procedures. Its features include: * Hyperbolic diffusion equations for elliptic problems * Lattice-Boltzmann equations (D2Q9 and D3Q27 schemes) * Shallow water equations - * Several scalar conservation laws (e.g., linear advection, Burgers' equation) + * Several scalar conservation laws (e.g., linear advection, Burgers' equation, LWR traffic flow) * Multi-physics simulations * [Self-gravitating gas dynamics](https://github.com/trixi-framework/paper-self-gravitating-gas-dynamics) * Shared-memory parallelization via multithreading diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index a3f7d1d2569..0d6fabcd4a9 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -2,6 +2,9 @@ # readability #! format: off +using Pkg +Pkg.activate(@__DIR__) + using BenchmarkTools using Trixi @@ -47,13 +50,14 @@ end let SUITE["latency"] = BenchmarkGroup() SUITE["latency"]["default_example"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(default_example())'`) seconds=60 + `$(Base.julia_cmd()) --project=$(@__DIR__) -e 'using Trixi; trixi_include(default_example())'`) seconds=60 for polydeg in [3, 7] command = "using Trixi; trixi_include(joinpath(examples_dir(), \"tree_2d_dgsem\", \"elixir_advection_extended.jl\"), tspan=(0.0, 1.0e-10), polydeg=$(polydeg), save_restart=TrivialCallback(), save_solution=TrivialCallback())" - SUITE["latency"]["polydeg_$polydeg"] = @benchmarkable run($`$(Base.julia_cmd()) -e $command`) seconds=60 + SUITE["latency"]["polydeg_$polydeg"] = @benchmarkable run( + $`$(Base.julia_cmd()) --project=$(@__DIR__) -e $command`) seconds=60 end SUITE["latency"]["euler_2d"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_kelvin_helmholtz_instability.jl"), tspan=(0.0, 1.0e-10), save_restart=TrivialCallback(), save_solution=TrivialCallback())'`) seconds=60 + `$(Base.julia_cmd()) --project=$(@__DIR__) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_kelvin_helmholtz_instability.jl"), tspan=(0.0, 1.0e-10), save_solution=TrivialCallback())'`) seconds=60 SUITE["latency"]["mhd_2d"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_mhd_blast_wave.jl"), tspan=(0.0, 1.0e-10), save_restart=TrivialCallback(), save_solution=TrivialCallback())'`) seconds=60 + `$(Base.julia_cmd()) --project=$(@__DIR__) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_mhd_blast_wave.jl"), tspan=(0.0, 1.0e-10), save_solution=TrivialCallback())'`) seconds=60 end diff --git a/benchmark/elixir_2d_euler_vortex_unstructured.jl b/benchmark/elixir_2d_euler_vortex_unstructured.jl index 082b6648abf..43e4b6559de 100644 --- a/benchmark/elixir_2d_euler_vortex_unstructured.jl +++ b/benchmark/elixir_2d_euler_vortex_unstructured.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -49,11 +48,9 @@ end initial_condition = initial_condition_isentropic_vortex solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) -default_mesh_file = joinpath(@__DIR__, "mesh_uniform_cartesian.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/ranocha/f4ea19ba3b62348968c971db43d7798b/raw/a506abb9479c020920cf6068c142670fc1a9aadc/mesh_uniform_cartesian.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/ranocha/f4ea19ba3b62348968c971db43d7798b/raw/a506abb9479c020920cf6068c142670fc1a9aadc/mesh_uniform_cartesian.mesh", + joinpath(@__DIR__, "mesh_uniform_cartesian.mesh")) + mesh = UnstructuredMesh2D(mesh_file, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index 3a92a9ba700..7b8c25752f8 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -1,3 +1,7 @@ +using Pkg +Pkg.activate(@__DIR__) +Pkg.develop(PackageSpec(path = dirname(@__DIR__))) +Pkg.instantiate() using PkgBenchmark using Trixi diff --git a/docs/Project.toml b/docs/Project.toml index 3a091f5b4f1..3b8d169fdb8 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -10,9 +10,10 @@ OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Trixi2Vtk = "bc1476a1-1ca6-4cc3-950b-c312b255ff95" +TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284" [compat] -CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10" +CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10, 0.11" Documenter = "1" ForwardDiff = "0.10" HOHQMesh = "0.1, 0.2" @@ -23,3 +24,4 @@ OrdinaryDiffEq = "6.49.1" Plots = "1.9" Test = "1" Trixi2Vtk = "0.3" +TrixiBase = "0.1.1" diff --git a/docs/literate/make.jl b/docs/literate/make.jl index a04d8a0b333..84e4fbdced6 100644 --- a/docs/literate/make.jl +++ b/docs/literate/make.jl @@ -10,17 +10,17 @@ function create_files(title, file, repo_src, pages_dir, notebooks_dir; folder="" end binder_logo = "https://mybinder.org/badge_logo.svg" - nbviewer_logo = "https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.svg" - download_logo = "https://camo.githubusercontent.com/aea75103f6d9f690a19cb0e17c06f984ab0f472d9e6fe4eadaa0cc438ba88ada/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f646f776e6c6f61642d6e6f7465626f6f6b2d627269676874677265656e" + nbviewer_logo = "https://img.shields.io/badge/render-nbviewer-f37726" + raw_notebook_logo = "https://img.shields.io/badge/raw-notebook-4cc61e" notebook_path = "tutorials/notebooks/$notebook_filename" binder_url = "https://mybinder.org/v2/gh/trixi-framework/Trixi.jl/tutorial_notebooks?filepath=$notebook_path" nbviewer_url = "https://nbviewer.jupyter.org/github/trixi-framework/Trixi.jl/blob/tutorial_notebooks/$notebook_path" - download_url = "https://raw.githubusercontent.com/trixi-framework/Trixi.jl/tutorial_notebooks/$notebook_path" + raw_notebook_url = "https://raw.githubusercontent.com/trixi-framework/Trixi.jl/tutorial_notebooks/$notebook_path" binder_badge = "# [![]($binder_logo)]($binder_url)" nbviewer_badge = "# [![]($nbviewer_logo)]($nbviewer_url)" - download_badge = "# [![]($download_logo)]($download_url)" + raw_notebook_badge = "# [![]($raw_notebook_logo)]($raw_notebook_url)" # Generate notebook file function preprocess_notebook(content) @@ -32,7 +32,7 @@ function create_files(title, file, repo_src, pages_dir, notebooks_dir; folder="" # Generate markdown file function preprocess_docs(content) - return string("# # [$title](@id $(splitext(file)[1]))\n $binder_badge\n $nbviewer_badge\n $download_badge\n\n", content) + return string("# # [$title](@id $(splitext(file)[1]))\n $binder_badge\n $nbviewer_badge\n $raw_notebook_badge\n\n", content) end Literate.markdown(joinpath(repo_src, folder, file), joinpath(pages_dir, folder); preprocess=preprocess_docs,) end diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup.jl b/docs/literate/src/files/behind_the_scenes_simulation_setup.jl new file mode 100644 index 00000000000..c93660e9bc1 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup.jl @@ -0,0 +1,253 @@ +#src # Behind the scenes of a simulation setup + +# This tutorial will guide you through a simple Trixi.jl setup ("elixir"), giving an overview of +# what happens in the background during the initialization of a simulation. While the setup +# described herein does not cover all details, it involves relatively stable parts of Trixi.jl that +# are unlikely to undergo significant changes in the near future. The goal is to clarify some of +# the more fundamental, *technical* concepts that are applicable to a variety of +# (also more complex) configurations. + +# Trixi.jl follows the [method of lines](http://www.scholarpedia.org/article/Method_of_lines) concept for solving partial differential equations (PDEs). +# Firstly, the PDEs are reduced to a (potentially huge) system of +# ordinary differential equations (ODEs) by discretizing the spatial derivatives. Subsequently, +# these generated ODEs may be solved with methods available in OrdinaryDiffEq.jl or those specifically +# implemented in Trixi.jl. The following steps elucidate the process of transitioning from PDEs to +# ODEs within the framework of Trixi.jl. + +# ## Basic setup + +# Import essential libraries and specify an equation. + +using Trixi, OrdinaryDiffEq +equations = LinearScalarAdvectionEquation2D((-0.2, 0.7)) + +# Generate a spatial discretization using a [`TreeMesh`](@ref) with a pre-coarsened set of cells. + +coordinates_min = (-2.0, -2.0) +coordinates_max = (2.0, 2.0) + +coarsening_patches = ((type = "box", coordinates_min = [0.0, -2.0], + coordinates_max = [2.0, 0.0]),) + +mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level = 2, + n_cells_max = 30_000, + coarsening_patches = coarsening_patches) + +# The created `TreeMesh` looks like the following: + +# ![TreeMesh_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/d5ef76ee-8246-4730-a692-b472c06063a3) + +# Instantiate a [`DGSEM`](@ref) solver with a user-specified polynomial degree. The solver +# will define `polydeg + 1` [Gauss-Lobatto nodes](https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss%E2%80%93Lobatto_rules) and their associated weights within +# the reference interval ``[-1, 1]`` in each spatial direction. These nodes will be subsequently +# used to approximate solutions on each leaf cell of the `TreeMesh`. + +solver = DGSEM(polydeg = 3) + +# Gauss-Lobatto nodes with `polydeg = 3`: + +# ![Gauss-Lobatto_nodes_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/1d894611-801e-4f75-bff0-d77ca1c672e5) + +# ## Overview of the [`SemidiscretizationHyperbolic`](@ref) type + +# At this stage, all necessary components for configuring the spatial discretization are in place. +# The remaining task is to combine these components into a single structure that will be used +# throughout the entire simulation process. This is where [`SemidiscretizationHyperbolic`](@ref) +# comes into play. + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +# The constructor for the `SemidiscretizationHyperbolic` object calls numerous sub-functions to +# perform the necessary initialization steps. A brief description of the key sub-functions is +# provided below. + + +# - `init_elements(leaf_cell_ids, mesh, equations, dg.basis, RealT, uEltype)` + +# The fundamental elements for approximating the solution are the leaf +# cells. The solution is constructed as a polynomial of the degree specified in the `DGSEM` +# solver in each spatial direction on each leaf cell. This polynomial approximation is evaluated +# at the Gauss-Lobatto nodes mentioned earlier. The `init_elements` function extracts +# these leaf cells from the `TreeMesh`, assigns them the label "elements", records their +# coordinates, and maps the Gauss-Lobatto nodes from the 1D interval ``[-1, 1]`` onto each coordinate axis +# of every element. + + +# ![elements_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/9f486670-b579-4e42-8697-439540c8bbb4) + +# The visualization of elements with nodes shown here includes spaces between elements, which do +# not exist in reality. This spacing is included only for illustrative purposes to underscore the +# separation of elements and the independent projection of nodes onto each element. + + +# - `init_interfaces(leaf_cell_ids, mesh, elements)` + +# At this point, the elements with nodes have been defined; however, they lack the necessary +# communication functionality. This is crucial because the local solution polynomials on the +# elements are not independent of each other. Furthermore, nodes on the boundary of adjacent +# elements share the same spatial location, which requires a method to combine this into a +# meaningful solution. +# Here [Riemann solvers](https://en.wikipedia.org/wiki/Riemann_solver#Approximate_solvers) +# come into play which can handle the principal ambiguity of a multi-valued solution at the +# same spatial location. + +# As demonstrated earlier, the elements can have varying sizes. Let us initially consider +# neighbors with equal size. For these elements, the `init_interfaces` function generates +# interfaces that store information about adjacent elements, their relative positions, and +# allocate containers for sharing solution data between neighbors during the solution process. + +# In our visualization, these interfaces would conceptually resemble tubes connecting the +# corresponding elements. + +# ![interfaces_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/bc3b6b02-afbc-4371-aaf7-c7bdc5a6c540) + + +# - `init_mortars(leaf_cell_ids, mesh, elements, dg.mortar)` + +# Returning to the consideration of different sizes among adjacent elements, within the +# `TreeMesh`, adjacent leaf cells can vary in side length by a maximum factor of two. This +# implies that a large element has one neighbor of +# equal size with a connection through an interface, or two neighbors at half the size, +# requiring a connection through so called "mortars". In 3D, a large element would have +# four small neighbor elements. + +# Mortars store information about the connected elements, their relative positions, and allocate +# containers for storing the solutions along the boundaries between these elements. + +# Due to the differing sizes of adjacent elements, it is not feasible to directly map boundary +# nodes of adjacent elements. Therefore, the concept of mortars employs a mass-conserving +# interpolation function to map boundary nodes from a larger element to a smaller one. + +# In our visualization, mortars are represented as branched tubes. + +# ![mortars_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/43a95a60-3a31-4b1f-8724-14049e7a0481) + + +# - `init_boundaries(leaf_cell_ids, mesh, elements)` + +# In order to apply boundary conditions, it is necessary to identify the locations of the +# boundaries. Therefore, we initialize a "boundaries" object, which records the elements that +# contain boundaries, specifies which side of an element is a boundary, stores the coordinates +# of boundary nodes, and allocates containers for managing solutions at these boundaries. + +# In our visualization, boundaries and their corresponding nodes are highlighted with green, +# semi-transparent lines. + +# ![boundaries_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/21996b20-4a22-4dfb-b16a-e2c22c2f29fe) + +# All the structures mentioned earlier are collected as a cache of type `NamedTuple`. Subsequently, +# an object of type `SemidiscretizationHyperbolic` is initialized using this cache, initial and +# boundary conditions, equations, mesh and solver. + +# In conclusion, the primary purpose of a `SemidiscretizationHyperbolic` is to collect equations, +# the geometric representation of the domain, and approximation instructions, creating specialized +# structures to interconnect these components in a manner that enables their utilization for +# the numerical solution of partial differential equations (PDEs). + +# As evident from the earlier description of `SemidiscretizationHyperbolic`, it comprises numerous +# functions called subsequently. Without delving into details, the structure of the primary calls +# are illustrated as follows: + +# ![SemidiscretizationHyperbolic_structure](https://github.com/trixi-framework/Trixi.jl/assets/119304909/8bf59422-0537-4d7a-9f13-d9b2253c19d7) + +# ## Overview of the [`semidiscretize`](@ref) function + +# At this stage, we have defined the equations and configured the domain's discretization. The +# final step before solving is to select a suitable time span and apply the corresponding initial +# conditions, which are already stored in the initialized `SemidiscretizationHyperbolic` object. + +# The purpose of the [`semidiscretize`](@ref) function is to wrap the semidiscretization as an +# `ODEProblem` within the specified time interval. During this procedure the approximate solution +# is created at the given initial time via the specified `initial_condition` function from the +# `SemidiscretizationHyperbolic` object. This `ODEProblem` can be subsequently passed to the +# `solve` function from the [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl) package +# or to [`Trixi.solve`](@ref). + +ode = semidiscretize(semi, (0.0, 1.0)); + +# The `semidiscretize` function involves a deep tree of subsequent calls, with the primary ones +# explained below. + + +# - `allocate_coefficients(mesh, equations, solver, cache)` + +# To apply initial conditions, a data structure ("container") needs to be generated to store the +# initial values of the target variables for each node within each element. + +# Since only one-dimensional `Array`s are `resize!`able in Julia, we use `Vector`s as an internal +# storage for the target variables and `resize!` them whenever needed, e.g. to change the number +# of elements. Then, during the solving process the same memory is reused by `unsafe_wrap`ping +# multi-dimensional `Array`s around the internal storage. + +# - `wrap_array(u_ode, semi)` + +# As previously noted, `u_ode` is constructed as a 1D vector to ensure compatibility with +# OrdinaryDiffEq.jl. However, for internal use within Trixi.jl, identifying which part of the +# vector relates to specific variables, elements, or nodes can be challenging. + +# This is why the `u_ode` vector is wrapped by the `wrap_array` function using `unsafe_wrap` +# to form a multidimensional array `u`. In this array, the first dimension corresponds to +# variables, followed by N dimensions corresponding to nodes for each of N space dimensions. +# The last dimension corresponds to the elements. +# Consequently, navigation within this multidimensional array becomes noticeably easier. + +# "Wrapping" in this context involves the creation of a reference to the same storage location +# but with an alternative structural representation. This approach enables the use of both +# instances `u` and `u_ode` as needed, so that changes are simultaneously reflected in both. +# This is possible because, from a storage perspective, they share the same stored data, while +# access to this data is provided in different ways. + + +# - `compute_coefficients!(u, initial_conditions, t, mesh::DG, equations, solver, cache)` + +# Now the variable `u`, intended to store solutions, has been allocated and wrapped, it is time +# to apply the initial conditions. The `compute_coefficients!` function calculates the initial +# conditions for each variable at every node within each element and properly stores them in the +# `u` array. + +# At this stage, the `semidiscretize` function has all the necessary components to initialize and +# return an `ODEProblem` object, which will be used by the `solve` function to compute the +# solution. + +# In summary, the internal workings of `semidiscretize` with brief descriptions can be presented +# as follows. + +# ![semidiscretize_structure](https://github.com/trixi-framework/Trixi.jl/assets/119304909/491eddc4-aadb-4e29-8c76-a7c821d0674e) + +# ## Functions `solve` and `rhs!` + +# Once the `ODEProblem` object is initialized, the `solve` function and one of the ODE solvers from +# the OrdinaryDiffEq.jl package can be utilized to compute an approximated solution using the +# instructions contained in the `ODEProblem` object. + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), dt = 0.01, + save_everystep = false); + +# Since the `solve` function and the ODE solver have no knowledge +# of a particular spatial discretization, it is necessary to define a +# "right-hand-side function", `rhs!`, within Trixi.jl. + +# Trixi.jl includes a set of `rhs!` functions designed to compute `du`, i.e., +# ``\frac{\partial u}{\partial t}`` according to the structure +# of the setup. These `rhs!` functions calculate interface, mortars, and boundary fluxes, in +# addition to surface and volume integrals, in order to construct the `du` vector. This `du` vector +# is then used by the time integration method to obtain the solution at the subsequent time step. +# The `rhs!` function is called by time integration methods in each iteration of the solve loop +# within OrdinaryDiffEq.jl, with arguments `du`, `u`, `semidiscretization`, and the current time. + +# Trixi.jl uses a two-levels approach for `rhs!` functions. The first level is limited to a +# single function for each `semidiscretization` type, and its role is to redirect data to the +# target `rhs!` for specific solver and mesh types. This target `rhs!` function is responsible +# for calculating `du`. + +# Path from the `solve` function call to the appropriate `rhs!` function call: + +# ![rhs_structure](https://github.com/trixi-framework/Trixi.jl/assets/119304909/dbea9a0e-25a4-4afa-855e-01f1ad619982) + +# Computed solution: + +using Plots +plot(sol) +pd = PlotData2D(sol) +plot!(getmesh(pd)) diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml new file mode 100644 index 00000000000..43aec5b7f54 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml @@ -0,0 +1,2 @@ +[deps] +Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md new file mode 100644 index 00000000000..011b5c75860 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md @@ -0,0 +1,15 @@ +# Plots for the tutorial "Behind the scenes of a simulation setup" + +To create all the images for the tutorial, execute the following command from the directory of this `README.md`: +```julia +pkg> activate . +julia> include.(readdir("src"; join=true)) +``` +To create all images from a different directory, substitute `"src"` with the path to the `src` +folder. The resulting images will be generated in your current directory as PNG files. + +To generate a specific image, run the following command while replacing `"path/to/src"` and `"file_name"` with the appropriate values: +```julia +pkg> activate . +julia> include(joinpath("path/to/src", "file_name")) +``` \ No newline at end of file diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl new file mode 100644 index 00000000000..cae7b19d470 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl @@ -0,0 +1,64 @@ +using Plots +plot(Shape([(-2.3,4.5), (2.35,4.5), (2.35,2.5), (-2.3,2.5)]), linecolor="black", fillcolor="white", label=false,linewidth=2, size=(800,600), showaxis=false, grid=false, xlim=(-2.4,2.8), ylim=(-25,5.5)) +annotate!(2.3, 3.5, ("SemidiscretizationHyperbolic(mesh, equations, initial_conditions, solver; source_terms, +boundary_conditions, RealT, uEltype, initial_cache) ", 10, :black, :right)) +annotate!(-2.3, 1.5, ("creates and returns SemidiscretizationHyperbolic object, initialized using a mesh, equations, +initial_conditions, boundary_conditions, source_terms, solver and cache", 9, :black, :left)) +plot!([-1.2,-1.2],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([-1.2,-1.4],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([-1.2,-1.],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +annotate!(-1, -0.7, ("specialized for mesh +and solver types", 9, :black, :left)) +plot!([1.25,1.25],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([1.25,1.05],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([1.25,1.45],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +annotate!(1.48, -0.7, ("specialized for mesh +and boundary_conditions +types", 9, :black, :left)) + +plot!(Shape([(-2.3,-2), (-0.1,-2), (-0.1,-4), (-2.3,-4)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.2, -3, ("create_cache(mesh::TreeMesh, equations, + solver::Dg, RealT, uEltype)", 10, :black, :center)) +plot!([-2.22,-2.22],[-4,-22],arrow=false,color=:black,linewidth=2,label="") + +plot!(Shape([(-0.05,-2), (2.6,-2), (2.6,-4), (-0.05,-4)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(1.27, -3, ("digest_boundary_conditions(boundary_conditions, + mesh, solver, cache)", 10, :black, :center)) +annotate!(2.6, -5, ("if necessary, converts passed boundary_conditions + into a suitable form for processing by Trixi.jl", 9, :black, :right)) + +plot!(Shape([(-2,-6), (-0.55,-6), (-0.55,-7.1), (-2,-7.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -6.5, ("local_leaf_cells(mesh.tree)", 10, :black, :left)) +annotate!(-2, -7.5, ("returns cells for which an element needs to be created (i.e. all leaf cells)", 9, :black, :left)) +plot!([-2.22,-2],[-6.5,-6.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-2,-9), (1.73,-9), (1.73,-10.1), (-2,-10.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -9.5, ("init_elements(leaf_cell_ids, mesh, equations, dg.basis, RealT, uEltype)", 10, :black, :left)) +annotate!(-2, -10.5, ("creates and initializes elements, projects Gauss-Lobatto basis onto each of them", 9, :black, :left)) +plot!([-2.22,-2],[-9.5,-9.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-2,-12), (0.4,-12), (0.4,-13.1), (-2,-13.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -12.5, ("init_interfaces(leaf_cell_ids, mesh, elements)", 10, :black, :left)) +annotate!(-2, -13.5, ("creates and initializes interfaces between each pair of adjacent elements of the same size", 9, :black, :left)) +plot!([-2.22,-2],[-12.5,-12.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-2,-15), (0.5,-15), (0.5,-16.1), (-2,-16.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -15.5, ("init_boundaries(leaf_cell_ids, mesh, elements)", 10, :black, :left)) +annotate!(-2, -17, ("creates and initializes boundaries, remembers each boundary element, as well as the coordinates of +each boundary node", 9, :black, :left)) +plot!([-2.22,-2],[-15.5,-15.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-1.6,-18), (1.3,-18), (1.3,-19.1), (-1.6,-19.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.55, -18.5, ("init_mortars(leaf_cell_ids, mesh, elements, dg.mortar)", 10, :black, :left)) +annotate!(-1.6, -20, ("creates and initializes mortars (type of interfaces) between each triple of adjacent coarsened +and corresponding small elements", 9, :black, :left)) +plot!([-2.22,-1.6],[-18.5,-18.5],arrow=true,color=:black,linewidth=2,label="") +annotate!(-2.15, -19, ("2D and 3D", 8, :black, :left)) + +plot!(Shape([(-2,-21), (1.5,-21), (1.5,-23.1), (-2,-23.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -22, ("create_cache(mesh, equations, dg.volume_integral, dg, uEltype) +for 2D and 3D create_cache(mesh, equations, dg.mortar, uEltype)", 10, :black, :left)) +annotate!(-2, -23.5, ("add specialized parts of the cache required to compute the volume integral, etc.", 9, :black, :left)) +plot!([-2.22,-2],[-22,-22],arrow=true,color=:black,linewidth=2,label="") + +savefig("./SemidiscretizationHyperbolic") \ No newline at end of file diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl new file mode 100644 index 00000000000..14475d21339 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl @@ -0,0 +1,190 @@ +using Plots + +function min(coordinates::Vector{Tuple{Float64, Float64}}, i) + min=coordinates[1][i] + for j in coordinates + if min>j[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if maxj[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if maxj[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if maxj[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if max finer mesh) +# Mesh.CharacteristicLengthFactor = 1.0 * 2^(-3); +# // Insist on quads instead of default triangles +# Mesh.RecombineAll = 1; +# // Violet instead of green base color for better visibility +# Mesh.ColorCarousel = 0; +# +# // points of the airfoil contour +# // Format: {x, y, z, DesiredCellSize}. See the documentation: https://gmsh.info/doc/texinfo/gmsh.html#Points +# // These concrete points are generated using the tool from https://github.com/cfsengineering/GMSH-Airfoil-2D +# Point(5) = {-0.4900332889206208, 0.09933466539753061, 0, 0.125}; +# Point(6) = {-0.4900274857651495, 0.1021542752054094, 0, 0.125}; +# Point(7) = {-0.4894921489729144, 0.1049830248247787, 0, 0.125}; +# Point(8) = {-0.4884253336670712, 0.1078191282319664, 0, 0.125}; +# Point(9) = {-0.4868257975566199, 0.1106599068424483, 0, 0.125}; +# Point(10) = {-0.4846930063965668, 0.1135018003016681, 0, 0.125}; +# Point(11) = {-0.4820271400142729, 0.1163403835785654, 0, 0.125}; +# Point(12) = {-0.4788290988083472, 0.1191703902233889, 0, 0.125}; +# Point(13) = {-0.4751005105908123, 0.1219857416089041, 0, 0.125}; +# Point(14) = {-0.4708437376101668, 0.1247795819332056, 0, 0.125}; +# Point(15) = {-0.4660618835629463, 0.1275443187232316, 0, 0.125}; +# Point(16) = {-0.4607588003749649, 0.1302716685409717, 0, 0.125}; +# Point(17) = {-0.4549390945110529, 0.132952707559475, 0, 0.125}; +# Point(18) = {-0.448608132554204, 0.1355779266432996, 0, 0.125}; +# Point(19) = {-0.4417720457819508, 0.138137290538182, 0, 0.125}; +# Point(20) = {-0.4344377334597768, 0.140620300747629, 0, 0.125}; +# Point(21) = {-0.4266128645686593, 0.1430160616500159, 0, 0.125}; +# Point(22) = {-0.4183058776865576, 0.1453133493887722, 0, 0.125}; +# Point(23) = {-0.4095259787518715, 0.147500683050503, 0, 0.125}; +# Point(24) = {-0.4002831364505879, 0.1495663976315875, 0, 0.125}; +# Point(25) = {-0.3905880749878933, 0.1514987182830453, 0, 0.125}; +# Point(26) = {-0.3804522640292948, 0.1532858353164163, 0, 0.125}; +# Point(27) = {-0.3698879056254708, 0.1549159794501833, 0, 0.125}; +# Point(28) = {-0.3589079179688306, 0.1563774967770029, 0, 0.125}; +# Point(29) = {-0.3475259158676376, 0.1576589229368209, 0, 0.125}; +# Point(30) = {-0.3357561878650377, 0.158749055989923, 0, 0.125}; +# Point(31) = {-0.3236136699747923, 0.1596370274972017, 0, 0.125}; +# Point(32) = {-0.3111139160522804, 0.1603123713324616, 0, 0.125}; +# Point(33) = {-0.298273064867608, 0.160765089773461, 0, 0.125}; +# Point(34) = {-0.2851078039966239, 0.1609857164445887, 0, 0.125}; +# Point(35) = {-0.2716353306943914, 0.160965375714529, 0, 0.125}; +# Point(36) = {-0.2578733099632437, 0.1606958381868515, 0, 0.125}; +# Point(37) = {-0.2438398300730194, 0.1601695719599709, 0, 0.125}; +# Point(38) = {-0.2295533558334121, 0.1593797893750759, 0, 0.125}; +# Point(39) = {-0.2150326799566391, 0.1583204890160489, 0, 0.125}; +# Point(40) = {-0.2002968728818922, 0.1569864927736143, 0, 0.125}; +# Point(41) = {-0.18536523146042, 0.1553734778363979, 0, 0.125}; +# Point(42) = {-0.1702572269208345, 0.1534780035235666, 0, 0.125}; +# Point(43) = {-0.1549924525477129, 0.1512975329264932, 0, 0.125}; +# Point(44) = {-0.1395905715122586, 0.1488304493795921, 0, 0.125}; +# Point(45) = {-0.1240712652914332, 0.1460760678321895, 0, 0.125}; +# Point(46) = {-0.1084541831014299, 0.1430346412430583, 0, 0.125}; +# Point(47) = {-0.09275889275279087, 0.1397073621660917, 0, 0.125}; +# Point(48) = {-0.07700483330818747, 0.1360963597385416, 0, 0.125}; +# Point(49) = {-0.06151286635366404, 0.1323050298149023, 0, 0.125}; +# Point(50) = {-0.04602933219022032, 0.1283521764905442, 0, 0.125}; +# Point(51) = {-0.03051345534800332, 0.1242331665904082, 0, 0.125}; +# Point(52) = {-0.01498163190522334, 0.1199540932779839, 0, 0.125}; +# Point(53) = {0.0005498526140696458, 0.1155214539466913, 0, 0.125}; +# Point(54) = {0.01606484191716884, 0.1109421303284033, 0, 0.125}; +# Point(55) = {0.03154732664394777, 0.106223368423828, 0, 0.125}; +# Point(56) = {0.0469814611314705, 0.1013727584299359, 0, 0.125}; +# Point(57) = {0.06235157928986135, 0.09639821481480275, 0, 0.125}; +# Point(58) = {0.07764220964363855, 0.09130795666388933, 0, 0.125}; +# Point(59) = {0.09283808959671735, 0.08611048839446452, 0, 0.125}; +# Point(60) = {0.1079241789809607, 0.08081458090718853, 0, 0.125}; +# Point(61) = {0.1228856729475325, 0.07542925321638272, 0, 0.125}; +# Point(62) = {0.1377080142575372, 0.06996375457378261, 0, 0.125}; +# Point(63) = {0.1523769050236616, 0.06442754707512513, 0, 0.125}; +# Point(64) = {0.1668783179480157, 0.05883028871526293, 0, 0.125}; +# Point(65) = {0.1811985070933818, 0.05318181683604975, 0, 0.125}; +# Point(66) = {0.1953240182159306, 0.04749213189240609, 0, 0.125}; +# Point(67) = {0.2092416986775084, 0.04177138144606024, 0, 0.125}; +# Point(68) = {0.2229387069452062, 0.03602984428372727, 0, 0.125}; +# Point(69) = {0.2364025216754475, 0.03027791454712048, 0, 0.125}; +# Point(70) = {0.2496209503696738, 0.02452608575629232, 0, 0.125}; +# Point(71) = {0.2625821375791982, 0.01878493460541621, 0, 0.125}; +# Point(72) = {0.2752745726282818, 0.01306510441121807, 0, 0.125}; +# Point(73) = {0.28768709681727, 0.007377288098728577, 0, 0.125}; +# Point(74) = {0.2998089100619555, 0.001732210616722449, 0, 0.125}; +# Point(75) = {0.3116295769214332, -0.003859389314124759, 0, 0.125}; +# Point(76) = {0.3231390319647309, -0.009386778203927332, 0, 0.125}; +# Point(77) = {0.3343275844265582, -0.01483924761490708, 0, 0.125}; +# Point(78) = {0.3451859221046181, -0.02020613485126957, 0, 0.125}; +# Point(79) = {0.3557051144551212, -0.02547684454806881, 0, 0.125}; +# Point(80) = {0.3658766148492779, -0.03064087116872238, 0, 0.125}; +# Point(81) = {0.3756922619615632, -0.0356878223992288, 0, 0.125}; +# Point(82) = {0.3851442802702071, -0.0406074434050937, 0, 0.125}; +# Point(83) = {0.394225279661484, -0.04538964189492445, 0, 0.125}; +# Point(84) = {0.4029282541416501, -0.05002451391298904, 0, 0.125}; +# Point(85) = {0.4112465796735204, -0.05450237026215737, 0, 0.125}; +# Point(86) = {0.4191740111683733, -0.05881376343890812, 0, 0.125}; +# Point(87) = {0.4267046786777481, -0.06294951494382847, 0, 0.125}; +# Point(88) = {0.4338330828434404, -0.06690074281456823, 0, 0.125}; +# Point(89) = {0.4405540896772232, -0.07065888921378868, 0, 0.125}; +# Point(90) = {0.4468629247542237, -0.07421574789251445, 0, 0.125}; +# Point(91) = {0.4527551669150955, -0.0775634913396257, 0, 0.125}; +# Point(92) = {0.4582267415819197, -0.08069469742118066, 0, 0.125}; +# Point(93) = {0.4632739138007936, -0.08360237530891265, 0, 0.125}; +# Point(94) = {0.4678932811302005, -0.08627999049569551, 0, 0.125}; +# Point(95) = {0.4720817664982195, -0.08872148869699745, 0, 0.125}; +# Point(96) = {0.4758366111533843, -0.09092131844134463, 0, 0.125}; +# Point(97) = {0.4791553678333992, -0.09287445215953141, 0, 0.125}; +# Point(98) = {0.4820358942729613, -0.09457640559161551, 0, 0.125}; +# Point(99) = {0.4844763471666588, -0.09602325534252773, 0, 0.125}; +# Point(100) = {0.4864751766953637, -0.09721165443119822, 0, 0.125}; +# Point(101) = {0.4880311217148797, -0.09813884569428721, 0, 0.125}; +# Point(102) = {0.4891432056939881, -0.09880267292366274, 0, 0.125}; +# Point(103) = {0.4898107334756874, -0.09920158963645126, 0, 0.125}; +# Point(104) = {0.4900332889206208, -0.09933466539753058, 0, 0.125}; +# Point(105) = {0.4897824225031319, -0.09926905587549506, 0, 0.125}; +# Point(106) = {0.4890301110661922, -0.09907236506934192, 0, 0.125}; +# Point(107) = {0.4877772173496635, -0.09874500608402761, 0, 0.125}; +# Point(108) = {0.48602517690576, -0.09828766683852558, 0, 0.125}; +# Point(109) = {0.4837759946062035, -0.09770130916007558, 0, 0.125}; +# Point(110) = {0.4810322398085871, -0.09698716747297723, 0, 0.125}; +# Point(111) = {0.4777970402368822, -0.09614674703990023, 0, 0.125}; +# Point(112) = {0.4740740746447117, -0.09518182170326678, 0, 0.125}; +# Point(113) = {0.4698675643422793, -0.09409443106501386, 0, 0.125}; +# Point(114) = {0.4651822636784212, -0.09288687703518478, 0, 0.125}; +# Point(115) = {0.460023449577924, -0.09156171967354482, 0, 0.125}; +# Point(116) = {0.4543969102408585, -0.09012177224394632, 0, 0.125}; +# Point(117) = {0.4483089331151018, -0.08857009539864649, 0, 0.125}; +# Point(118) = {0.4417662922553667, -0.08690999040934186, 0, 0.125}; +# Point(119) = {0.4347762351819332, -0.0851449913634191, 0, 0.125}; +# Point(120) = {0.4273464693498908, -0.08327885624791403, 0, 0.125}; +# Point(121) = {0.419485148335155, -0.08131555684993674, 0, 0.125}; +# Point(122) = {0.411200857836944, -0.07925926741086739, 0, 0.125}; +# Point(123) = {0.4025026015879757, -0.07711435198240155, 0, 0.125}; +# Point(124) = {0.3933997872536054, -0.07488535044544484, 0, 0.125}; +# Point(125) = {0.3839022123897198, -0.07257696316779733, 0, 0.125}; +# Point(126) = {0.3740200505167618, -0.07019403429336624, 0, 0.125}; +# Point(127) = {0.3637638373540689, -0.06774153367408606, 0, 0.125}; +# Point(128) = {0.3531444572451353, -0.06522453747557577, 0, 0.125}; +# Point(129) = {0.3421731297908021, -0.06264820750853495, 0, 0.125}; +# Point(130) = {0.3308613966940724, -0.06001776935966011, 0, 0.125}; +# Point(131) = {0.3192211088076166, -0.05733848941811218, 0, 0.125}; +# Point(132) = {0.3072644133633567, -0.05461565091590426, 0, 0.125}; +# Point(133) = {0.2950037413531683, -0.05185452912263369, 0, 0.125}; +# Point(134) = {0.2824517950208982, -0.04906036585632723, 0, 0.125}; +# Point(135) = {0.2696215354188702, -0.04623834349241404, 0, 0.125}; +# Point(136) = {0.2565261699769623, -0.04339355867155523, 0, 0.125}; +# Point(137) = {0.2431791400293651, -0.04053099592384862, 0, 0.125}; +# Point(138) = {0.2295941082432855, -0.03765550144139543, 0, 0.125}; +# Point(139) = {0.2157849458952252, -0.03477175724299444, 0, 0.125}; +# Point(140) = {0.2017657199439165, -0.03188425598348005, 0, 0.125}; +# Point(141) = {0.187550679854507, -0.02899727666564914, 0, 0.125}; +# Point(142) = {0.1731542441359161, -0.02611486151457043, 0, 0.125}; +# Point(143) = {0.1585909865622793, -0.02324079427214604, 0, 0.125}; +# Point(144) = {0.1438756220597465, -0.02037858016395433, 0, 0.125}; +# Point(145) = {0.129022992251319, -0.0175314277805827, 0, 0.125}; +# Point(146) = {0.1140480506645569, -0.01470223310184333, 0, 0.125}; +# Point(147) = {0.09896584761949168, -0.01189356587453844, 0, 0.125}; +# Point(148) = {0.08379151482656089, -0.009107658532933174, 0, 0.125}; +# Point(149) = {0.06854024973648176, -0.006346397826038436, 0, 0.125}; +# Point(150) = {0.05322729969528361, -0.003611319287478529, 0, 0.125}; +# Point(151) = {0.03786794596792287, -0.00090360465249055, 0, 0.125}; +# Point(152) = {0.0224774877026287, 0.00177591770710904, 0, 0.125}; +# Point(153) = {0.007071225915134205, 0.004426769294862437, 0, 0.125}; +# Point(154) = {-0.00833555242305456, 0.007048814950562587, 0, 0.125}; +# Point(155) = {-0.02372759010533726, 0.009642253300220296, 0, 0.125}; +# Point(156) = {-0.03908967513210498, 0.01220760427359278, 0, 0.125}; +# Point(157) = {-0.05440665578848514, 0.01474569380579989, 0, 0.125}; +# Point(158) = {-0.06966345527617318, 0.01725763587663899, 0, 0.125}; +# Point(159) = {-0.08484508582421563, 0.01974481207672138, 0, 0.125}; +# Point(160) = {-0.09987987792382108, 0.02219618763023203, 0, 0.125}; +# Point(161) = {-0.1145078729404739, 0.02450371976411331, 0, 0.125}; +# Point(162) = {-0.1290321771824579, 0.0267015185742735, 0, 0.125}; +# Point(163) = {-0.143440065923266, 0.02879471001709845, 0, 0.125}; +# Point(164) = {-0.1577189448447794, 0.03078883518202784, 0, 0.125}; +# Point(165) = {-0.1718563428491159, 0.03268980457290044, 0, 0.125}; +# Point(166) = {-0.1858399037768357, 0.03450385196323842, 0, 0.125}; +# Point(167) = {-0.1996573773370766, 0.03623748825421298, 0, 0.125}; +# Point(168) = {-0.2132966095779342, 0.03789745574015834, 0, 0.125}; +# Point(169) = {-0.2267455332406906, 0.0394906831577609, 0, 0.125}; +# Point(170) = {-0.2399921583489679, 0.04102424186233269, 0, 0.125}; +# Point(171) = {-0.2530245633834605, 0.04250530343879837, 0, 0.125}; +# Point(172) = {-0.2658308873846617, 0.04394109901707172, 0, 0.125}; +# Point(173) = {-0.2783993233102972, 0.04533888052223981, 0, 0.125}; +# Point(174) = {-0.2907181129514687, 0.04670588405019788, 0, 0.125}; +# Point(175) = {-0.3027755436824813, 0.0480492955198111, 0, 0.125}; +# Point(176) = {-0.3145599472847223, 0.04937621871394801, 0, 0.125}; +# Point(177) = {-0.3260597010456697, 0.05069364578437131, 0, 0.125}; +# Point(178) = {-0.337263231291058, 0.05200843025992359, 0, 0.125}; +# Point(179) = {-0.3481590194623916, 0.05332726256406103, 0, 0.125}; +# Point(180) = {-0.3587356108043638, 0.05465664801682354, 0, 0.125}; +# Point(181) = {-0.3689816256782782, 0.0560028872679817, 0, 0.125}; +# Point(182) = {-0.3788857734692287, 0.05737205908247899, 0, 0.125}; +# Point(183) = {-0.3884368690074614, 0.05877000537646382, 0, 0.125}; +# Point(184) = {-0.3976238513788748, 0.06020231838219783, 0, 0.125}; +# Point(185) = {-0.40643580495675, 0.06167432980291591, 0, 0.125}; +# Point(186) = {-0.4148619824472646, 0.06319110180426264, 0, 0.125}; +# Point(187) = {-0.4228918297057104, 0.06475741967717524, 0, 0.125}; +# Point(188) = {-0.43051501204915, 0.06637778599795482, 0, 0.125}; +# Point(189) = {-0.4377214417649294, 0.06805641610468524, 0, 0.125}; +# Point(190) = {-0.4445013064933708, 0.06979723470503821, 0, 0.125}; +# Point(191) = {-0.4508450981473512, 0.07160387342876083, 0, 0.125}; +# Point(192) = {-0.4567436420215075, 0.073479669138689, 0, 0.125}; +# Point(193) = {-0.4621881257395756, 0.07542766281688272, 0, 0.125}; +# Point(194) = {-0.4671701276898881, 0.07745059884734995, 0, 0.125}; +# Point(195) = {-0.471681644606229, 0.07955092452372269, 0, 0.125}; +# Point(196) = {-0.4757151179639407, 0.0817307896190848, 0, 0.125}; +# Point(197) = {-0.4792634588791559, 0.0839920458658267, 0, 0.125}; +# Point(198) = {-0.4823200712220043, 0.08633624620581726, 0, 0.125}; +# Point(199) = {-0.4848788726822436, 0.08876464368523246, 0, 0.125}; +# Point(200) = {-0.4869343135575803, 0.09127818988394577, 0, 0.125}; +# Point(201) = {-0.4884813930704814, 0.09387753278635144, 0, 0.125}; +# Point(202) = {-0.4895156730580155, 0.09656301401871749, 0, 0.125}; +# +# // splines of the airfoil +# Spline(5) = {5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104}; +# Spline(6) = {104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,5}; +# +# // airfoil +# Line Loop(9) = {5, 6}; +# // complete domain +# Plane Surface(1) = {8, 9}; +# +# // labeling of the boundary parts +# Physical Line(1) = {4}; // inflow +# Physical Line(2) = {2}; // outflow +# Physical Line(3) = {1, 3}; // airfoil +# Physical Line(4) = {5, 6}; // upper/lower wall +# Physical Surface(1) = {10}; +# ``` +# From which we can construct a mesh like this: +# ![mesh_screenshot](https://github.com/trixi-framework/Trixi.jl/assets/75639095/67adfe3d-d403-4cd3-acaa-971a34df0709) +# +# The first four points define the bounding box = (near-field) domain: +# ```c++ +# // outer bounding box +# Point(1) = {-1.25, -0.5, 0, 1.0}; +# Point(2) = {1.25, -0.5, 0, 1.0}; +# Point(3) = {1.25, 0.5, 0, 1.0}; +# Point(4) = {-1.25, 0.5, 0, 1.0}; +# ``` +# which is constructed from connecting the points in lines: +# ```c++ +# // outer box +# Line(1) = {1, 2}; +# Line(2) = {2, 3}; +# Line(3) = {3, 4}; +# Line(4) = {4, 1}; +# // outer box +# Line Loop(8) = {1, 2, 3, 4}; +# ``` +# +# This is followed by a couple (in principle optional) settings where the most important one is +# ```c++ +# // Insist on quads instead of default triangles +# Mesh.RecombineAll = 1; +# ``` +# which forces `gmsh` to generate quadrilateral elements instead of the default triangles. +# This is strictly required to be able to use the mesh later with `p4est`, which supports only straight-sided quads, +# i.e., `C2D4, CPS4, S4` in 2D and `C3D` in 3D. +# See for more details the (short) [documentation](https://p4est.github.io/p4est-howto.pdf) on the interaction of `p4est` with `.inp` files. +# In principle, it should also be possible to use the `recombine` function of `gmsh` to convert the triangles to quads, +# but this is observed to be less robust than enforcing quads from the beginning. +# +# Then the airfoil is defined by a set of points: +# ```c++ +# // points of the airfoil contour +# Point(5) = {-0.4900332889206208, 0.09933466539753061, 0, 0.125}; +# Point(6) = {-0.4900274857651495, 0.1021542752054094, 0, 0.125}; +# ... +# ``` +# which are connected by splines for the upper and lower part of the airfoil: +# ```c++ +# // splines of the airfoil +# Spline(5) = {5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20, +# ... +# 96,97,98,99,100,101,102,103,104}; +# Spline(6) = {104,105,106,107,108,109,110,111,112,113,114,115, +# ... +# 200,201,202,5}; +# ``` +# which are then connected to form a single line loop for easy physical group assignment: +# ```c++ +# // airfoil +# Line Loop(9) = {5, 6}; +# ``` +# +# At the end of the file the physical groups are defined: +# ```c++ +# // labeling of the boundary parts +# Physical Line(1) = {4}; // Inflow. Label in Abaqus .inp file: PhysicalLine1 +# Physical Line(2) = {2}; // Outflow. Label in Abaqus .inp file: PhysicalLine2 +# Physical Line(3) = {1, 3}; // Upper and lower wall/farfield/... Label in Abaqus .inp file: PhysicalLine3 +# Physical Line(4) = {5, 6}; // Airfoil. Label in Abaqus .inp file: PhysicalLine4 +# ``` +# which are crucial for the correct assignment of boundary conditions in `Trixi.jl`. +# In particular, it is the responsibility of a user to keep track on the physical boundary names between the mesh generation and assignment of boundary condition functions in an elixir. +# +# After opening this file in `gmsh`, meshing the geometry and exporting to Abaqus `.inp` format, +# we can have a look at the input file: +# ``` +# *Heading +# +# *NODE +# 1, -1.25, -0.5, 0 +# 2, 1.25, -0.5, 0 +# 3, 1.25, 0.5, 0 +# 4, -1.25, 0.5, 0 +# ... +# ******* E L E M E N T S ************* +# *ELEMENT, type=T3D2, ELSET=Line1 +# 1, 1, 7 +# ... +# *ELEMENT, type=CPS4, ELSET=Surface1 +# 191, 272, 46, 263, 807 +# ... +# *NSET,NSET=PhysicalLine1 +# 1, 4, 52, 53, 54, 55, 56, 57, 58, +# *NSET,NSET=PhysicalLine2 +# 2, 3, 26, 27, 28, 29, 30, 31, 32, +# *NSET,NSET=PhysicalLine3 +# 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, +# 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, +# 23, 24, 25, 33, 34, 35, 36, 37, 38, 39, +# 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, +# 50, 51, +# *NSET,NSET=PhysicalLine4 +# 5, 6, 59, 60, 61, 62, 63, 64, 65, 66, +# 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, +# 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, +# 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, +# 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, +# 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, +# 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, +# 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, +# 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, +# 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, +# 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, +# 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, +# 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, +# 187, 188, 189, 190, +# ``` +# +# First, the coordinates of the nodes are listed, followed by the elements. +# Note that `gmsh` exports also line elements of type `T3D2` which are ignored by `p4est`. +# The relevant elements in 2D which form the gridcells are of type `CPS4` which are defined by their four corner nodes. +# This is followed by the nodesets encoded via `*NSET` which are used to assign boundary conditions in Trixi.jl. +# Trixi.jl parses the `.inp` file and assigns the edges (in 2D, surfaces in 3D) of elements to the corresponding boundary condition based on +# the supplied `boundary_symbols` that have to be supplied to the `P4estMesh` constructor: +# ```julia +# # boundary symbols +# boundary_symbols = [:PhysicalLine1, :PhysicalLine2, :PhysicalLine3, :PhysicalLine4] +# mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) +# ``` +# The same boundary symbols have then also be supplied to the semidiscretization alongside the +# corresponding physical boundary conditions: +# ```julia +# # Supersonic inflow boundary condition. +# # Calculate the boundary flux entirely from the external solution state, i.e., set +# # external solution state values for everything entering the domain. +# @inline function boundary_condition_supersonic_inflow(u_inner, +# normal_direction::AbstractVector, +# x, t, surface_flux_function, +# equations::CompressibleEulerEquations2D) +# u_boundary = initial_condition_mach2_flow(x, t, equations) +# flux = Trixi.flux(u_boundary, normal_direction, equations) +# +# return flux +# end +# +# # Supersonic outflow boundary condition. +# # Calculate the boundary flux entirely from the internal solution state. Analogous to supersonic inflow +# # except all the solution state values are set from the internal solution as everything leaves the domain +# @inline function boundary_condition_supersonic_outflow(u_inner, +# normal_direction::AbstractVector, x, +# t, +# surface_flux_function, +# equations::CompressibleEulerEquations2D) +# flux = Trixi.flux(u_inner, normal_direction, equations) +# +# boundary_conditions = Dict(:PhysicalLine1 => boundary_condition_supersonic_inflow, # Left boundary +# :PhysicalLine2 => boundary_condition_supersonic_outflow, # Right boundary +# :PhysicalLine3 => boundary_condition_supersonic_outflow, # Top and bottom boundary +# :PhysicalLine4 => boundary_condition_slip_wall) # Airfoil +# +# semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, +# boundary_conditions = boundary_conditions) +# ``` +# Note that you **have to** supply the `boundary_symbols` keyword to the `P4estMesh` constructor +# to select the boundaries from the available nodesets in the `.inp` file. +# If the `boundary_symbols` keyword is not supplied, all boundaries will be assigned to the default set `:all`. + +# ## Package versions + +# These results were obtained using the following versions. + +using InteractiveUtils +versioninfo() + +using Pkg +Pkg.status(["Trixi", "OrdinaryDiffEq", "Plots", "Download"], + mode=PKGMODE_MANIFEST) diff --git a/docs/make.jl b/docs/make.jl index df8ac04be12..8427c4049bf 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -8,6 +8,7 @@ end using Trixi using Trixi2Vtk +using TrixiBase # Get Trixi.jl root directory trixi_root_dir = dirname(@__DIR__) @@ -48,6 +49,13 @@ end # "title" => ["subtitle 1" => ("folder 1", "filename 1.jl"), # "subtitle 2" => ("folder 2", "filename 2.jl")] files = [ + # Topic: introduction + "First steps in Trixi.jl" => [ + "Getting started" => ("first_steps", "getting_started.jl"), + "Create first setup" => ("first_steps", "create_first_setup.jl"), + "Changing Trixi.jl itself" => ("first_steps", "changing_trixi.jl"), + ], + "Behind the scenes of a simulation setup" => "behind_the_scenes_simulation_setup.jl", # Topic: DG semidiscretizations "Introduction to DG methods" => "scalar_linear_advection_1d.jl", "DGSEM with flux differencing" => "DGSEM_FluxDiff.jl", @@ -65,17 +73,18 @@ files = [ "Adaptive mesh refinement" => "adaptive_mesh_refinement.jl", "Structured mesh with curvilinear mapping" => "structured_mesh_mapping.jl", "Unstructured meshes with HOHQMesh.jl" => "hohqmesh_tutorial.jl", + "P4est mesh from gmsh" => "p4est_from_gmsh.jl", # Topic: other stuff "Explicit time stepping" => "time_stepping.jl", "Differentiable programming" => "differentiable_programming.jl", - "Custom semidiscretizations" => "custom_semidiscretization.jl" + "Custom semidiscretizations" => "custom_semidiscretization.jl", ] tutorials = create_tutorials(files) # Make documentation makedocs( # Specify modules for which docstrings should be shown - modules = [Trixi, Trixi2Vtk], + modules = [Trixi, TrixiBase, Trixi2Vtk], # Set sitename to Trixi.jl sitename = "Trixi.jl", # Provide additional formatting options @@ -107,6 +116,7 @@ makedocs( ], "Time integration" => "time_integration.md", "Callbacks" => "callbacks.md", + "Coupling" => "multi-physics_coupling.md" ], "Advanced topics & developers" => [ "Conventions" =>"conventions.md", @@ -120,6 +130,7 @@ makedocs( "Troubleshooting and FAQ" => "troubleshooting.md", "Reference" => [ "Trixi.jl" => "reference-trixi.md", + "TrixiBase.jl" => "reference-trixibase.md", "Trixi2Vtk.jl" => "reference-trixi2vtk.md" ], "Authors" => "authors.md", diff --git a/docs/src/conventions.md b/docs/src/conventions.md index dab1b8533a5..4f9e0ec4e67 100644 --- a/docs/src/conventions.md +++ b/docs/src/conventions.md @@ -47,10 +47,12 @@ Trixi.jl is distributed with several examples in the form of elixirs, small Julia scripts containing everything to set up and run a simulation. Working interactively from the Julia REPL with these scripts can be quite convenient while for exploratory research and development of Trixi.jl. For example, you -can use the convenience function [`trixi_include`](@ref) to `include` an elixir -with some modified arguments. To enable this, it is helpful to use a consistent -naming scheme in elixirs, since [`trixi_include`](@ref) can only perform simple -replacements. Some standard variables names are +can use the convenience function +[`trixi_include`](@ref) +to `include` an elixir with some modified arguments. To enable this, it is +helpful to use a consistent naming scheme in elixirs, since +[`trixi_include`](@ref) +can only perform simple replacements. Some standard variables names are - `polydeg` for the polynomial degree of a solver - `surface_flux` for the numerical flux at surfaces diff --git a/docs/src/meshes/p4est_mesh.md b/docs/src/meshes/p4est_mesh.md index 1e5d782ebb6..3b35ffcad6f 100644 --- a/docs/src/meshes/p4est_mesh.md +++ b/docs/src/meshes/p4est_mesh.md @@ -55,7 +55,7 @@ This heading is used to indicate to the mesh constructor which of the above mapp create a curvilinear mesh. If the Abaqus file header is **not** present then the `P4estMesh` is created with the first strategy above. -#### List of corner nodes +#### [List of corner nodes](@id corner-node-list) Next, prefaced with `*NODE`, comes a list of the physical `(x,y,z)` coordinates of all the corners. The first integer in the list of the corners provides its id number. @@ -71,7 +71,7 @@ Thus, for the two-dimensional example mesh this block of corner information is 7, 3.0, -1.0, 0.0 ``` -#### List of elements +#### [List of elements](@id element-list) The element connectivity is given after the list of corners. The header for this information block is ``` @@ -98,7 +98,9 @@ The construction of the element neighbor ids and identifying physical boundary s directly from the [`p4est`](https://github.com/cburstedde/p4est) library. For example, the neighbor connectivity is created in the mesh constructor using the wrapper `read_inp_p4est` function. -#### HOHQMesh boundary information +#### Encoding of boundaries + +##### HOHQMesh boundary information If present, any additional information in the mesh file that was created by `HOHQMesh` is prefaced with `** ` to make it an Abaqus comment. @@ -230,8 +232,38 @@ For completeness, we provide the entire Abaqus mesh file for the example mesh in ** Bottom --- Right --- ``` +##### Standard Abaqus format boundary information + +As an alternative to an Abaqus mesh generated by `HOHQMesh`, `.inp` files with boundary information encoded as nodesets `*NSET,NSET=` can be used to construct a `p4est` mesh. +This is especially useful for usage of existing meshes (consisting of bilinear elements) which could stem from the popular [`gmsh`](https://gmsh.info/) meshing software. + +In addition to the list of [nodes](@ref corner-node-list) and [elements](@ref element-list) given above, there are nodesets of the form +``` +*NSET,NSET=PhysicalLine1 +1, 4, 52, 53, 54, 55, 56, 57, 58, +``` +present which are used to associate the edges defined through their corner nodes with a label. In this case it is called `PhysicalLine1`. +By looping over every element and its associated edges, consisting of two nodes, we query the read in `NSET`s if the current node pair is present. + +To prevent that every nodeset following `*NSET,NSET=` is treated as a boundary, the user must supply a `boundary_symbols` keyword to the [`P4estMesh`](@ref) constructor: + +```julia +boundary_symbols = [:PhysicalLine1] + +mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) +``` +By doing so, only nodesets with a label present in `boundary_symbols` are treated as physical boundaries. +Other nodesets that could be used for diagnostics are not treated as external boundaries. +Note that there is a leading colon `:` compared to the label in the `.inp` mesh file. +This is required to turn the label into a [`Symbol`](https://docs.julialang.org/en/v1/manual/metaprogramming/#Symbols). + +A 2D example for this mesh, which is read-in for an unstructured mesh file created with `gmsh`, is presented in +`examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl`. + ### Mesh in three spatial dimensions +#### `HOHQMesh`-Extended Abaqus format + The 3D Abaqus file format with high-order boundary information from `HOHQMesh` is very similar to the 2D version discussed above. There are only three changes: @@ -346,4 +378,222 @@ transfinite map of the straight sided hexahedral element to find \mathbf{X}(\boldsymbol{\xi}) = \boldsymbol\Sigma(\boldsymbol{\xi}) - \mathcal{C}_{\texttt{edge}}(\boldsymbol{\xi}) + \mathbf{X}_{linear}(\boldsymbol{\xi}). +``` + +#### Construction from standard Abaqus + +Also for a mesh in standard Abaqus format there are no qualitative changes when going from 2D to 3D. +The most notable difference is that boundaries are formed in 3D by faces defined by four nodes while in 2D boundaries are edges consisting of two elements. +A simple mesh file, which is used also in `examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl`, is given below: +``` +*Heading + +*NODE +1, -2, 0, 0 +2, -1, 0, 0 +3, -1, 1, 0 +4, -2, 1, 0 +5, -2, 0, 1 +6, -1, 0, 1 +7, -1, 1, 1 +8, -2, 1, 1 +9, -1.75, 1, 0 +10, -1.5, 1, 0 +11, -1.25, 1, 0 +12, -1, 0.75000000000035, 0 +13, -1, 0.50000000000206, 0 +14, -1, 0.25000000000104, 0 +15, -1.25, 0, 0 +16, -1.5, 0, 0 +17, -1.75, 0, 0 +18, -2, 0.24999999999941, 0 +19, -2, 0.49999999999869, 0 +20, -2, 0.74999999999934, 0 +21, -1.75, 0, 1 +22, -1.5, 0, 1 +23, -1.25, 0, 1 +24, -1, 0.24999999999941, 1 +25, -1, 0.49999999999869, 1 +26, -1, 0.74999999999934, 1 +27, -1.25, 1, 1 +28, -1.5, 1, 1 +29, -1.75, 1, 1 +30, -2, 0.75000000000035, 1 +31, -2, 0.50000000000206, 1 +32, -2, 0.25000000000104, 1 +33, -2, 0, 0.24999999999941 +34, -2, 0, 0.49999999999869 +35, -2, 0, 0.74999999999934 +36, -2, 1, 0.24999999999941 +37, -2, 1, 0.49999999999869 +38, -2, 1, 0.74999999999934 +39, -1, 0, 0.24999999999941 +40, -1, 0, 0.49999999999869 +41, -1, 0, 0.74999999999934 +42, -1, 1, 0.24999999999941 +43, -1, 1, 0.49999999999869 +44, -1, 1, 0.74999999999934 +45, -1.25, 0.25000000000063, 0 +46, -1.25, 0.50000000000122, 0 +47, -1.25, 0.7500000000001, 0 +48, -1.5, 0.25000000000023, 0 +49, -1.5, 0.50000000000038, 0 +50, -1.5, 0.74999999999984, 0 +51, -1.75, 0.24999999999982, 0 +52, -1.75, 0.49999999999953, 0 +53, -1.75, 0.74999999999959, 0 +54, -1.75, 0.25000000000063, 1 +55, -1.75, 0.50000000000122, 1 +56, -1.75, 0.7500000000001, 1 +57, -1.5, 0.25000000000023, 1 +58, -1.5, 0.50000000000038, 1 +59, -1.5, 0.74999999999984, 1 +60, -1.25, 0.24999999999982, 1 +61, -1.25, 0.49999999999953, 1 +62, -1.25, 0.74999999999959, 1 +63, -2, 0.24999999999982, 0.24999999999941 +64, -2, 0.49999999999953, 0.24999999999941 +65, -2, 0.74999999999959, 0.24999999999941 +66, -2, 0.25000000000023, 0.49999999999869 +67, -2, 0.50000000000038, 0.49999999999869 +68, -2, 0.74999999999984, 0.49999999999869 +69, -2, 0.25000000000063, 0.74999999999934 +70, -2, 0.50000000000122, 0.74999999999934 +71, -2, 0.7500000000001, 0.74999999999934 +72, -1.25, 1, 0.74999999999934 +73, -1.25, 1, 0.49999999999869 +74, -1.25, 1, 0.24999999999941 +75, -1.5, 1, 0.74999999999934 +76, -1.5, 1, 0.49999999999869 +77, -1.5, 1, 0.24999999999941 +78, -1.75, 1, 0.74999999999934 +79, -1.75, 1, 0.49999999999869 +80, -1.75, 1, 0.24999999999941 +81, -1, 0.25000000000063, 0.24999999999941 +82, -1, 0.50000000000122, 0.24999999999941 +83, -1, 0.7500000000001, 0.24999999999941 +84, -1, 0.25000000000023, 0.49999999999869 +85, -1, 0.50000000000038, 0.49999999999869 +86, -1, 0.74999999999984, 0.49999999999869 +87, -1, 0.24999999999982, 0.74999999999934 +88, -1, 0.49999999999953, 0.74999999999934 +89, -1, 0.74999999999959, 0.74999999999934 +90, -1.75, 0, 0.74999999999934 +91, -1.75, 0, 0.49999999999869 +92, -1.75, 0, 0.24999999999941 +93, -1.5, 0, 0.74999999999934 +94, -1.5, 0, 0.49999999999869 +95, -1.5, 0, 0.24999999999941 +96, -1.25, 0, 0.74999999999934 +97, -1.25, 0, 0.49999999999869 +98, -1.25, 0, 0.24999999999941 +99, -1.75, 0.25000000000043, 0.74999999999934 +100, -1.75, 0.25000000000023, 0.49999999999869 +101, -1.75, 0.25000000000002, 0.24999999999941 +102, -1.75, 0.5000000000008, 0.74999999999934 +103, -1.75, 0.50000000000038, 0.49999999999869 +104, -1.75, 0.49999999999995, 0.24999999999941 +105, -1.75, 0.74999999999997, 0.74999999999934 +106, -1.75, 0.74999999999984, 0.49999999999869 +107, -1.75, 0.74999999999972, 0.24999999999941 +108, -1.5, 0.25000000000023, 0.74999999999934 +109, -1.5, 0.25000000000023, 0.49999999999869 +110, -1.5, 0.25000000000023, 0.24999999999941 +111, -1.5, 0.50000000000038, 0.74999999999934 +112, -1.5, 0.50000000000038, 0.49999999999869 +113, -1.5, 0.50000000000038, 0.24999999999941 +114, -1.5, 0.74999999999984, 0.74999999999934 +115, -1.5, 0.74999999999984, 0.49999999999869 +116, -1.5, 0.74999999999984, 0.24999999999941 +117, -1.25, 0.25000000000002, 0.74999999999934 +118, -1.25, 0.25000000000023, 0.49999999999869 +119, -1.25, 0.25000000000043, 0.24999999999941 +120, -1.25, 0.49999999999995, 0.74999999999934 +121, -1.25, 0.50000000000038, 0.49999999999869 +122, -1.25, 0.5000000000008, 0.24999999999941 +123, -1.25, 0.74999999999972, 0.74999999999934 +124, -1.25, 0.74999999999984, 0.49999999999869 +125, -1.25, 0.74999999999997, 0.24999999999941 +******* E L E M E N T S ************* +*ELEMENT, type=C3D8, ELSET=Volume1 +153, 54, 21, 5, 32, 99, 90, 35, 69 +154, 99, 90, 35, 69, 100, 91, 34, 66 +155, 100, 91, 34, 66, 101, 92, 33, 63 +156, 101, 92, 33, 63, 51, 17, 1, 18 +157, 55, 54, 32, 31, 102, 99, 69, 70 +158, 102, 99, 69, 70, 103, 100, 66, 67 +159, 103, 100, 66, 67, 104, 101, 63, 64 +160, 104, 101, 63, 64, 52, 51, 18, 19 +161, 56, 55, 31, 30, 105, 102, 70, 71 +162, 105, 102, 70, 71, 106, 103, 67, 68 +163, 106, 103, 67, 68, 107, 104, 64, 65 +164, 107, 104, 64, 65, 53, 52, 19, 20 +165, 29, 56, 30, 8, 78, 105, 71, 38 +166, 78, 105, 71, 38, 79, 106, 68, 37 +167, 79, 106, 68, 37, 80, 107, 65, 36 +168, 80, 107, 65, 36, 9, 53, 20, 4 +169, 57, 22, 21, 54, 108, 93, 90, 99 +170, 108, 93, 90, 99, 109, 94, 91, 100 +171, 109, 94, 91, 100, 110, 95, 92, 101 +172, 110, 95, 92, 101, 48, 16, 17, 51 +173, 58, 57, 54, 55, 111, 108, 99, 102 +174, 111, 108, 99, 102, 112, 109, 100, 103 +175, 112, 109, 100, 103, 113, 110, 101, 104 +176, 113, 110, 101, 104, 49, 48, 51, 52 +177, 59, 58, 55, 56, 114, 111, 102, 105 +178, 114, 111, 102, 105, 115, 112, 103, 106 +179, 115, 112, 103, 106, 116, 113, 104, 107 +180, 116, 113, 104, 107, 50, 49, 52, 53 +181, 28, 59, 56, 29, 75, 114, 105, 78 +182, 75, 114, 105, 78, 76, 115, 106, 79 +183, 76, 115, 106, 79, 77, 116, 107, 80 +184, 77, 116, 107, 80, 10, 50, 53, 9 +185, 60, 23, 22, 57, 117, 96, 93, 108 +186, 117, 96, 93, 108, 118, 97, 94, 109 +187, 118, 97, 94, 109, 119, 98, 95, 110 +188, 119, 98, 95, 110, 45, 15, 16, 48 +189, 61, 60, 57, 58, 120, 117, 108, 111 +190, 120, 117, 108, 111, 121, 118, 109, 112 +191, 121, 118, 109, 112, 122, 119, 110, 113 +192, 122, 119, 110, 113, 46, 45, 48, 49 +193, 62, 61, 58, 59, 123, 120, 111, 114 +194, 123, 120, 111, 114, 124, 121, 112, 115 +195, 124, 121, 112, 115, 125, 122, 113, 116 +196, 125, 122, 113, 116, 47, 46, 49, 50 +197, 27, 62, 59, 28, 72, 123, 114, 75 +198, 72, 123, 114, 75, 73, 124, 115, 76 +199, 73, 124, 115, 76, 74, 125, 116, 77 +200, 74, 125, 116, 77, 11, 47, 50, 10 +201, 24, 6, 23, 60, 87, 41, 96, 117 +202, 87, 41, 96, 117, 84, 40, 97, 118 +203, 84, 40, 97, 118, 81, 39, 98, 119 +204, 81, 39, 98, 119, 14, 2, 15, 45 +205, 25, 24, 60, 61, 88, 87, 117, 120 +206, 88, 87, 117, 120, 85, 84, 118, 121 +207, 85, 84, 118, 121, 82, 81, 119, 122 +208, 82, 81, 119, 122, 13, 14, 45, 46 +209, 26, 25, 61, 62, 89, 88, 120, 123 +210, 89, 88, 120, 123, 86, 85, 121, 124 +211, 86, 85, 121, 124, 83, 82, 122, 125 +212, 83, 82, 122, 125, 12, 13, 46, 47 +213, 7, 26, 62, 27, 44, 89, 123, 72 +214, 44, 89, 123, 72, 43, 86, 124, 73 +215, 43, 86, 124, 73, 42, 83, 125, 74 +216, 42, 83, 125, 74, 3, 12, 47, 11 +*NSET,NSET=PhysicalSurface1 +1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +11, 12, 13, 14, 15, 16, 17, 18, 19, 20, +21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +31, 32, 33, 34, 35, 36, 37, 38, 45, 46, +47, 48, 49, 50, 51, 52, 53, 54, 55, 56, +57, 58, 59, 60, 61, 62, 63, 64, 65, 66, +67, 68, 69, 70, 71, +*NSET,NSET=PhysicalSurface2 +1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +11, 12, 13, 14, 15, 16, 17, 21, 22, 23, +24, 25, 26, 27, 28, 29, 33, 34, 35, 36, +37, 38, 39, 40, 41, 42, 43, 44, 72, 73, +74, 75, 76, 77, 78, 79, 80, 81, 82, 83, +84, 85, 86, 87, 88, 89, 90, 91, 92, 93, +94, 95, 96, 97, 98, ``` \ No newline at end of file diff --git a/docs/src/multi-physics_coupling.md b/docs/src/multi-physics_coupling.md new file mode 100644 index 00000000000..eec92bc21de --- /dev/null +++ b/docs/src/multi-physics_coupling.md @@ -0,0 +1,46 @@ +# [Multi-physics coupling](@id multi-physics-coupling) +A complex simulation can consist of different spatial domains in which +different equations are being solved, different numerical methods being used +or the grid structure is different. +One example would be a fluid in a tank and an extended hot plate attached to it. +We would then like to solve the Navier-Stokes equations in the fluid domain +and the heat conduction equations in the plate. +The coupling would happen at the interface through the exchange of thermal energy. + + +## Converter coupling +It may happen that the two systems to be coupled do not share any variables, but +share some of the physics. +In such a situation, the same physics is just represented in a different form and with +a different set of variables. +This is the case, for instance assuming two domains, if there is a fluid system in one domain +and a Vlasov system in the other domain. +In that case we would have variables representing distribution functions of +the Vlasov system on one side and variables representing the mechanical quantities, like density, +of the fluid system. +To translate the fields from one description to the other one needs to use +converter functions. +These functions need to be hand tailored by the user in the elixir file where each +pair of coupled systems requires two coupling functions, one for each direction. + +In the general case, we have a system $A$ with $m$ variables +$u_{A,i}, \: i = 1, \dots, m$ and another +system $B$ with $n$ variables $u_{B,j}, \: j = 1, \dots, n$. +We then define two coupling functions, one that transforms $u_A$ into $u_B$ +and one that goes the other way. + +In their minimal form they take the position vector $x$, state vector $u$ +and the equations of the two coupled systems +and return the transformed variables. +By passing the equations we can make use of their parameters, if they are required. +Examples can be seen in `examples/structured_2d_dgsem/elixir_advection_coupled.jl`. + + +## Warning about binary compatibility +Currently the coordinate values on the nodes can differ by machine precision when +simulating the mesh and when splitting the mesh in multiple domains. +This is an issue coming from the coordinate interpolation on the nodes. +As a result, running a simulation in a single system and in two coupled domains +may result in a difference of the order of the machine precision. +While this is not an issue for most practical problems, it is best to keep this in mind when comparing test runs. + diff --git a/docs/src/performance.md b/docs/src/performance.md index df66f451b79..82d7f501f63 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -267,3 +267,14 @@ requires. It can thus be seen as a proxy for "energy used" and, as an extension, timing result, you need to set the analysis interval such that the `AnalysisCallback` is invoked at least once during the course of the simulation and discard the first PID value. + +## Performance issues with multi-threaded reductions +[False sharing](https://en.wikipedia.org/wiki/False_sharing) is a known performance issue +for systems with distributed caches. It also occurred for the implementation of a thread +parallel bounds checking routine for the subcell IDP limiting +in [PR #1736](https://github.com/trixi-framework/Trixi.jl/pull/1736). +After some [testing and discussion](https://github.com/trixi-framework/Trixi.jl/pull/1736#discussion_r1423881895), +it turned out that initializing a vector of length `n * Threads.nthreads()` and only using every +n-th entry instead of a vector of length `Threads.nthreads()` fixes the problem. +Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)`. +Now, the bounds checking routine of the IDP limiting scales as hoped. diff --git a/docs/src/reference-trixibase.md b/docs/src/reference-trixibase.md new file mode 100644 index 00000000000..c7a970f88ec --- /dev/null +++ b/docs/src/reference-trixibase.md @@ -0,0 +1,9 @@ +# TrixiBase.jl API + +```@meta +CurrentModule = TrixiBase +``` + +```@autodocs +Modules = [TrixiBase] +``` diff --git a/examples/dgmulti_2d/elixir_euler_hohqmesh.jl b/examples/dgmulti_2d/elixir_euler_hohqmesh.jl index f534b5bc8ad..9b14a5c6827 100644 --- a/examples/dgmulti_2d/elixir_euler_hohqmesh.jl +++ b/examples/dgmulti_2d/elixir_euler_hohqmesh.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -30,12 +29,8 @@ dg = DGMulti(polydeg = 8, element_type = Quad(), approximation_type = SBP(), ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", + joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh")) mesh = DGMultiMesh(dg, mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl b/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl index 0a50b3644f0..4bfb2d3e375 100644 --- a/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl +++ b/examples/p4est_2d_dgsem/elixir_advection_amr_unstructured_flag.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping_flag, diff --git a/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl b/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl index 37fcc547f60..1ab96925fe6 100644 --- a/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl +++ b/examples/p4est_2d_dgsem/elixir_advection_unstructured_flag.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -29,10 +28,8 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping_flag, diff --git a/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl new file mode 100644 index 00000000000..7e55a259596 --- /dev/null +++ b/examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl @@ -0,0 +1,109 @@ + +using Trixi +using OrdinaryDiffEq +using Downloads: download + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +@inline function initial_condition_mach2_flow(x, t, equations::CompressibleEulerEquations2D) + # set the freestream flow parameters + rho_freestream = 1.4 + v1 = 2.0 + v2 = 0.0 + p_freestream = 1.0 + + prim = SVector(rho_freestream, v1, v2, p_freestream) + return prim2cons(prim, equations) +end + +initial_condition = initial_condition_mach2_flow + +# Supersonic inflow boundary condition. +# Calculate the boundary flux entirely from the external solution state, i.e., set +# external solution state values for everything entering the domain. +@inline function boundary_condition_supersonic_inflow(u_inner, + normal_direction::AbstractVector, + x, t, surface_flux_function, + equations::CompressibleEulerEquations2D) + u_boundary = initial_condition_mach2_flow(x, t, equations) + flux = Trixi.flux(u_boundary, normal_direction, equations) + + return flux +end + +# Supersonic outflow boundary condition. +# Calculate the boundary flux entirely from the internal solution state. Analogous to supersonic inflow +# except all the solution state values are set from the internal solution as everything leaves the domain +@inline function boundary_condition_supersonic_outflow(u_inner, + normal_direction::AbstractVector, x, + t, + surface_flux_function, + equations::CompressibleEulerEquations2D) + flux = Trixi.flux(u_inner, normal_direction, equations) + + return flux +end + +polydeg = 3 + +surface_flux = flux_lax_friedrichs +volume_flux = flux_ranocha + +basis = LobattoLegendreBasis(polydeg) +shock_indicator = IndicatorHennemannGassner(equations, basis, + alpha_max = 0.5, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) +volume_integral = VolumeIntegralShockCapturingHG(shock_indicator; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + +# DG Solver +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +# Mesh generated from the following gmsh geometry input file: +# https://gist.githubusercontent.com/DanielDoehring/5ade6d93629f0d8c23a598812dbee2a9/raw/d2bc904fe92146eae1a36156e7f5c535dc1a80f1/NACA6412.geo +mesh_file = joinpath(@__DIR__, "mesh_NACA6412.inp") +isfile(mesh_file) || + download("https://gist.githubusercontent.com/DanielDoehring/e2a389f04f1e37b33819b9637e8ee4c3/raw/4bf7607a2ce4432fdb5cb87d5e264949b11bd5d7/mesh_NACA6412.inp", + mesh_file) + +boundary_symbols = [:PhysicalLine1, :PhysicalLine2, :PhysicalLine3, :PhysicalLine4] + +mesh = P4estMesh{2}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) + +boundary_conditions = Dict(:PhysicalLine1 => boundary_condition_supersonic_inflow, # Left boundary + :PhysicalLine2 => boundary_condition_supersonic_outflow, # Right boundary + :PhysicalLine3 => boundary_condition_supersonic_outflow, # Top and bottom boundary + :PhysicalLine4 => boundary_condition_slip_wall) # Airfoil + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +tspan = (0.0, 5.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 4.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + stepsize_callback) + +# Run the simulation +############################################################################### +sol = solve(ode, SSPRK104(; thread = OrdinaryDiffEq.True()); + dt = 1.0, # overwritten by the `stepsize_callback` + save_everystep = false, + callback = callbacks); + +summary_callback() # print the timer summary diff --git a/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl index 0ca4fdc2eb7..5db5f74a686 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_blast_wave_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -50,10 +49,8 @@ volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; solver = DGSEM(basis, surface_flux, volume_integral) # Unstructured mesh with 48 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", + joinpath(@__DIR__, "square_unstructured_1.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, initial_refinement_level = 1) diff --git a/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl index 92928146d7b..fbc11e89185 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_double_mach_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -99,11 +98,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_double_mach.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/a0806ef0d03cf5ea221af523167b6e32/raw/61ed0eb017eb432d996ed119a52fb041fe363e8c/abaqus_double_mach.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/a0806ef0d03cf5ea221af523167b6e32/raw/61ed0eb017eb432d996ed119a52fb041fe363e8c/abaqus_double_mach.inp", + joinpath(@__DIR__, "abaqus_double_mach.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl index 0ec9fc222f2..654efd5e209 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_forward_step_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -104,11 +103,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_forward_step.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/b346ee6aa5446687f128eab8b37d52a7/raw/cd1e1d43bebd8d2631a07caec45585ec8456ca4c/abaqus_forward_step.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/b346ee6aa5446687f128eab8b37d52a7/raw/cd1e1d43bebd8d2631a07caec45585ec8456ca4c/abaqus_forward_step.inp", + joinpath(@__DIR__, "abaqus_forward_step.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl b/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl index 38307a7d781..ab11dc11567 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ end # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 48 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", + joinpath(@__DIR__, "square_unstructured_1.inp")) # Map the unstructured mesh with the mapping above mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping, initial_refinement_level = 1) diff --git a/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl index 09d018309a6..084fd699b8e 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,10 +30,8 @@ mapping_flag = Trixi.transfinite_mapping(faces) # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 3, mapping = mapping_flag, diff --git a/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl b/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl index 36c5624ba97..76ee96d4766 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_supersonic_cylinder.jl @@ -13,7 +13,6 @@ # # Keywords: supersonic flow, shock capturing, AMR, unstructured curved mesh, positivity preservation, compressible Euler, 2D -using Downloads: download using OrdinaryDiffEq using Trixi @@ -82,11 +81,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_cylinder_in_channel.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/a08f78f6b185b63c3baeff911a63f628/raw/addac716ea0541f588b9d2bd3f92f643eb27b88f/abaqus_cylinder_in_channel.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/a08f78f6b185b63c3baeff911a63f628/raw/addac716ea0541f588b9d2bd3f92f643eb27b88f/abaqus_cylinder_in_channel.inp", + joinpath(@__DIR__, "abaqus_cylinder_in_channel.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl b/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl index 8b8d05bade8..75e60d0c78b 100644 --- a/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_euler_wall_bc_amr.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -39,11 +38,8 @@ solver = DGSEM(polydeg = 5, surface_flux = flux_lax_friedrichs, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "abaqus_gingerbread_man.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/0e9e990a04b5105d1d2e3096a6e41272/raw/0d924b1d7e7d3cc1070a6cc22fe1d501687aa6dd/abaqus_gingerbread_man.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/0e9e990a04b5105d1d2e3096a6e41272/raw/0d924b1d7e7d3cc1070a6cc22fe1d501687aa6dd/abaqus_gingerbread_man.inp", + joinpath(@__DIR__, "abaqus_gingerbread_man.inp")) mesh = P4estMesh{2}(mesh_file) diff --git a/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl b/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl index 380db487356..089e82580c9 100644 --- a/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/p4est_2d_dgsem/elixir_mhd_rotor.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -69,10 +68,8 @@ function mapping_twist(xi, eta) return SVector(x, y) end -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) mesh = P4estMesh{2}(mesh_file, polydeg = 4, diff --git a/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl b/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl index cd280cf5bf6..33afd2e030e 100644 --- a/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl +++ b/examples/p4est_3d_dgsem/elixir_advection_amr_unstructured_curved.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -49,10 +48,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) # Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). mesh = P4estMesh{3}(mesh_file, polydeg = 2, diff --git a/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl b/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl index 6df9ac0b16a..83adcbf6a63 100644 --- a/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl +++ b/examples/p4est_3d_dgsem/elixir_advection_unstructured_curved.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -46,10 +45,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) mesh = P4estMesh{3}(mesh_file, polydeg = 3, mapping = mapping, diff --git a/examples/p4est_3d_dgsem/elixir_euler_ec.jl b/examples/p4est_3d_dgsem/elixir_euler_ec.jl index d9d774a7ffc..91698545052 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_ec.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -46,10 +45,8 @@ function mapping(xi_, eta_, zeta_) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) mesh = P4estMesh{3}(mesh_file, polydeg = 5, mapping = mapping, diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl index 24a781ca59e..6406a38186b 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -47,10 +46,8 @@ function mapping(xi_, eta_, zeta_) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). mesh = P4estMesh{3}(mesh_file, polydeg = 2, diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl new file mode 100644 index 00000000000..bdc4da26c1f --- /dev/null +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream_boundaries.jl @@ -0,0 +1,60 @@ + +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_constant + +polydeg = 3 +solver = DGSEM(polydeg = polydeg, surface_flux = flux_lax_friedrichs) + +############################################################################### +# Get the uncurved mesh from a file (downloads the file if not available locally) + +default_mesh_file = joinpath(@__DIR__, "mesh_cube_with_boundaries.inp") +isfile(default_mesh_file) || + download("https://gist.githubusercontent.com/DanielDoehring/710eab379fe3042dc08af6f2d1076e49/raw/38e9803bc0dab9b32a61d9542feac5343c3e6f4b/mesh_cube_with_boundaries.inp", + default_mesh_file) +mesh_file = default_mesh_file + +boundary_symbols = [:PhysicalSurface1, :PhysicalSurface2] + +mesh = P4estMesh{3}(mesh_file, polydeg = polydeg, boundary_symbols = boundary_symbols) + +boundary_conditions = Dict(:PhysicalSurface1 => BoundaryConditionDirichlet(initial_condition), + :PhysicalSurface2 => BoundaryConditionDirichlet(initial_condition)) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.5) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl index f56fe3a429d..08307a449a7 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_free_stream_extruded.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -36,10 +35,8 @@ function mapping(xi, eta_, zeta_) end # Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) mesh = P4estMesh{3}(mesh_file, polydeg = 3, mapping = mapping, diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl index 0de22eaea40..e7ca0cad4ba 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -49,10 +48,8 @@ function mapping(xi, eta, zeta) end # Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 -mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) # Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). mesh = P4estMesh{3}(mesh_file, polydeg = 2, diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl index 0fa3a28fe8b..7d81d6739bf 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonperiodic_hohqmesh.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -19,11 +18,8 @@ boundary_conditions = Dict(:Bottom => boundary_condition, solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) # Unstructured 3D half circle mesh from HOHQMesh -default_mesh_file = joinpath(@__DIR__, "abaqus_half_circle_3d.inp") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/11461efbfb02c42e06aca338b3d0b645/raw/81deeb1ebc4945952c30af5bb75fe222a18d975c/abaqus_half_circle_3d.inp", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/11461efbfb02c42e06aca338b3d0b645/raw/81deeb1ebc4945952c30af5bb75fe222a18d975c/abaqus_half_circle_3d.inp", + joinpath(@__DIR__, "abaqus_half_circle_3d.inp")) mesh = P4estMesh{3}(mesh_file, initial_refinement_level = 0) diff --git a/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl b/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl new file mode 100644 index 00000000000..e5badf14451 --- /dev/null +++ b/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl @@ -0,0 +1,80 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### + +equations = TrafficFlowLWREquations1D() + +solver = DGSEM(polydeg = 3, surface_flux = FluxHLL(min_max_speed_davis)) + +coordinates_min = (-1.0,) # minimum coordinate +coordinates_max = (1.0,) # maximum coordinate +cells_per_dimension = (64,) + +mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max, + periodicity = false) + +# Example inspired from http://www.clawpack.org/riemann_book/html/Traffic_flow.html#Example:-green-light +# Green light that at x = 0 which switches at t = 0 from red to green. +# To the left there are cars bumper to bumper, to the right there are no cars. +function initial_condition_greenlight(x, t, equation::TrafficFlowLWREquations1D) + scalar = x[1] < 0.0 ? 1.0 : 0.0 + + return SVector(scalar) +end + +############################################################################### +# Specify non-periodic boundary conditions + +# Assume that there are always cars waiting at the left +function inflow(x, t, equations::TrafficFlowLWREquations1D) + return initial_condition_greenlight(coordinates_min, t, equations) +end +boundary_condition_inflow = BoundaryConditionDirichlet(inflow) + +# Cars may leave the modeled domain +function boundary_condition_outflow(u_inner, orientation, normal_direction, x, t, + surface_flux_function, + equations::TrafficFlowLWREquations1D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, orientation, equations) + + return flux +end + +boundary_conditions = (x_neg = boundary_condition_inflow, + x_pos = boundary_condition_outflow) + +initial_condition = initial_condition_greenlight + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 42, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() # print the timer summary diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled.jl b/examples/structured_2d_dgsem/elixir_advection_coupled.jl index 2a56d23f4c0..43b68f21b03 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled.jl @@ -2,31 +2,38 @@ using OrdinaryDiffEq using Trixi ############################################################################### -# Coupled semidiscretization of two linear advection systems, which are connected periodically +# Coupled semidiscretization of four linear advection systems using converter functions such that +# they are also coupled across the domain boundaries to generate a periodic system. # -# In this elixir, we have a square domain that is divided into a left half and a right half. On each -# half of the domain, a completely independent SemidiscretizationHyperbolic is created for the -# linear advection equations. The two systems are coupled in the x-direction and have periodic -# boundaries in the y-direction. For a high-level overview, see also the figure below: +# In this elixir, we have a square domain that is divided into a upper-left, lower-left, +# upper-right and lower-right quarter. On each quarter +# of the domain, a completely independent SemidiscretizationHyperbolic is created for the +# linear advection equations. The four systems are coupled in the x and y-direction. +# For a high-level overview, see also the figure below: # # (-1, 1) ( 1, 1) # ┌────────────────────┬────────────────────┐ -# │ ↑ periodic ↑ │ ↑ periodic ↑ │ -# │ │ │ +# │ ↑ coupled ↑ │ ↑ coupled ↑ │ # │ │ │ # │ ========= │ ========= │ # │ system #1 │ system #2 │ # │ ========= │ ========= │ # │ │ │ +# │<-- coupled │<-- coupled │ +# │ coupled -->│ coupled -->│ # │ │ │ +# │ ↓ coupled ↓ │ ↓ coupled ↓ │ +# ├────────────────────┼────────────────────┤ +# │ ↑ coupled ↑ │ ↑ coupled ↑ │ # │ │ │ +# │ ========= │ ========= │ +# │ system #3 │ system #4 │ +# │ ========= │ ========= │ # │ │ │ -# │ coupled -->│<-- coupled │ -# │ │ │ -# │<-- coupled │ coupled -->│ -# │ │ │ +# │<-- coupled │<-- coupled │ +# │ coupled -->│ coupled -->│ # │ │ │ -# │ ↓ periodic ↓ │ ↓ periodic ↓ │ +# │ ↓ coupled ↓ │ ↓ coupled ↓ │ # └────────────────────┴────────────────────┘ # (-1, -1) ( 1, -1) @@ -36,60 +43,135 @@ equations = LinearScalarAdvectionEquation2D(advection_velocity) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) -# First mesh is the left half of a [-1,1]^2 square -coordinates_min1 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +# This will be the number of elements for each quarter/semidiscretization. +cells_per_dimension = (8, 8) + +########### +# system #1 +########### + +coordinates_min1 = (-1.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max1 = (0.0, 1.0) # maximum coordinates (max(x), max(y)) -# Define identical resolution as a variable such that it is easier to change from `trixi_include` -cells_per_dimension = (8, 16) +mesh1 = StructuredMesh(cells_per_dimension, coordinates_min1, coordinates_max1) -cells_per_dimension1 = cells_per_dimension +# Define the coupling functions +coupling_function12 = (x, u, equations_other, equations_own) -> u +coupling_function13 = (x, u, equations_other, equations_own) -> u -mesh1 = StructuredMesh(cells_per_dimension1, coordinates_min1, coordinates_max1) +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg1 = BoundaryConditionCoupled(2, (:end, :i_forward), Float64, + coupling_function12) +boundary_conditions_x_pos1 = BoundaryConditionCoupled(2, (:begin, :i_forward), Float64, + coupling_function12) +boundary_conditions_y_neg1 = BoundaryConditionCoupled(3, (:i_forward, :end), Float64, + coupling_function13) +boundary_conditions_y_pos1 = BoundaryConditionCoupled(3, (:i_forward, :begin), Float64, + coupling_function13) # A semidiscretization collects data structures and functions for the spatial discretization semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_convergence_test, solver, - boundary_conditions = ( - # Connect left boundary with right boundary of right mesh - x_neg = BoundaryConditionCoupled(2, - (:end, - :i_forward), - Float64), - # Connect right boundary with left boundary of right mesh - x_pos = BoundaryConditionCoupled(2, - (:begin, - :i_forward), - Float64), - y_neg = boundary_condition_periodic, - y_pos = boundary_condition_periodic)) - -# Second mesh is the right half of a [-1,1]^2 square -coordinates_min2 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) + boundary_conditions = (x_neg = boundary_conditions_x_neg1, + x_pos = boundary_conditions_x_pos1, + y_neg = boundary_conditions_y_neg1, + y_pos = boundary_conditions_y_pos1)) + +########### +# system #2 +########### + +coordinates_min2 = (0.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) -cells_per_dimension2 = cells_per_dimension +mesh2 = StructuredMesh(cells_per_dimension, coordinates_min2, coordinates_max2) -mesh2 = StructuredMesh(cells_per_dimension2, coordinates_min2, coordinates_max2) +# Define the coupling functions +coupling_function21 = (x, u, equations_other, equations_own) -> u +coupling_function24 = (x, u, equations_other, equations_own) -> u +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg2 = BoundaryConditionCoupled(1, (:end, :i_forward), Float64, + coupling_function21) +boundary_conditions_x_pos2 = BoundaryConditionCoupled(1, (:begin, :i_forward), Float64, + coupling_function21) +boundary_conditions_y_neg2 = BoundaryConditionCoupled(4, (:i_forward, :end), Float64, + coupling_function24) +boundary_conditions_y_pos2 = BoundaryConditionCoupled(4, (:i_forward, :begin), Float64, + coupling_function24) + +# A semidiscretization collects data structures and functions for the spatial discretization semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_convergence_test, solver, - boundary_conditions = ( - # Connect left boundary with right boundary of left mesh - x_neg = BoundaryConditionCoupled(1, - (:end, - :i_forward), - Float64), - # Connect right boundary with left boundary of left mesh - x_pos = BoundaryConditionCoupled(1, - (:begin, - :i_forward), - Float64), - y_neg = boundary_condition_periodic, - y_pos = boundary_condition_periodic)) - -# Create a semidiscretization that bundles semi1 and semi2 -semi = SemidiscretizationCoupled(semi1, semi2) + boundary_conditions = (x_neg = boundary_conditions_x_neg2, + x_pos = boundary_conditions_x_pos2, + y_neg = boundary_conditions_y_neg2, + y_pos = boundary_conditions_y_pos2)) + +########### +# system #3 +########### + +coordinates_min3 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max3 = (0.0, 0.0) # maximum coordinates (max(x), max(y)) + +mesh3 = StructuredMesh(cells_per_dimension, coordinates_min3, coordinates_max3) + +# Define the coupling functions +coupling_function34 = (x, u, equations_other, equations_own) -> u +coupling_function31 = (x, u, equations_other, equations_own) -> u + +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg3 = BoundaryConditionCoupled(4, (:end, :i_forward), Float64, + coupling_function34) +boundary_conditions_x_pos3 = BoundaryConditionCoupled(4, (:begin, :i_forward), Float64, + coupling_function34) +boundary_conditions_y_neg3 = BoundaryConditionCoupled(1, (:i_forward, :end), Float64, + coupling_function31) +boundary_conditions_y_pos3 = BoundaryConditionCoupled(1, (:i_forward, :begin), Float64, + coupling_function31) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi3 = SemidiscretizationHyperbolic(mesh3, equations, initial_condition_convergence_test, + solver, + boundary_conditions = (x_neg = boundary_conditions_x_neg3, + x_pos = boundary_conditions_x_pos3, + y_neg = boundary_conditions_y_neg3, + y_pos = boundary_conditions_y_pos3)) + +########### +# system #4 +########### + +coordinates_min4 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) +coordinates_max4 = (1.0, 0.0) # maximum coordinates (max(x), max(y)) + +mesh4 = StructuredMesh(cells_per_dimension, coordinates_min4, coordinates_max4) + +# Define the coupling functions +coupling_function43 = (x, u, equations_other, equations_own) -> u +coupling_function42 = (x, u, equations_other, equations_own) -> u + +# Define the coupling boundary conditions and the system it is coupled to. +boundary_conditions_x_neg4 = BoundaryConditionCoupled(3, (:end, :i_forward), Float64, + coupling_function43) +boundary_conditions_x_pos4 = BoundaryConditionCoupled(3, (:begin, :i_forward), Float64, + coupling_function43) +boundary_conditions_y_neg4 = BoundaryConditionCoupled(2, (:i_forward, :end), Float64, + coupling_function42) +boundary_conditions_y_pos4 = BoundaryConditionCoupled(2, (:i_forward, :begin), Float64, + coupling_function42) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi4 = SemidiscretizationHyperbolic(mesh4, equations, initial_condition_convergence_test, + solver, + boundary_conditions = (x_neg = boundary_conditions_x_neg4, + x_pos = boundary_conditions_x_pos4, + y_neg = boundary_conditions_y_neg4, + y_pos = boundary_conditions_y_pos4)) + +# Create a semidiscretization that bundles all the semidiscretizations. +semi = SemidiscretizationCoupled(semi1, semi2, semi3, semi4) ############################################################################### # ODE solvers, callbacks etc. @@ -104,7 +186,10 @@ summary_callback = SummaryCallback() # The AnalysisCallback allows to analyse the solution in regular intervals and prints the results analysis_callback1 = AnalysisCallback(semi1, interval = 100) analysis_callback2 = AnalysisCallback(semi2, interval = 100) -analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2) +analysis_callback3 = AnalysisCallback(semi3, interval = 100) +analysis_callback4 = AnalysisCallback(semi4, interval = 100) +analysis_callback = AnalysisCallbackCoupled(semi, analysis_callback1, analysis_callback2, + analysis_callback3, analysis_callback4) # The SaveSolutionCallback allows to save the solution to a file in regular intervals save_solution = SaveSolutionCallback(interval = 100, diff --git a/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl b/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl new file mode 100644 index 00000000000..05c09d57530 --- /dev/null +++ b/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl @@ -0,0 +1,146 @@ +using OrdinaryDiffEq +using Trixi + +# Warm bubble test case from +# - Wicker, L. J., and Skamarock, W. C. (1998) +# A time-splitting scheme for the elastic equations incorporating +# second-order Runge–Kutta time differencing +# [DOI: 10.1175/1520-0493(1998)126%3C1992:ATSSFT%3E2.0.CO;2](https://doi.org/10.1175/1520-0493(1998)126%3C1992:ATSSFT%3E2.0.CO;2) +# See also +# - Bryan and Fritsch (2002) +# A Benchmark Simulation for Moist Nonhydrostatic Numerical Models +# [DOI: 10.1175/1520-0493(2002)130<2917:ABSFMN>2.0.CO;2](https://doi.org/10.1175/1520-0493(2002)130<2917:ABSFMN>2.0.CO;2) +# - Carpenter, Droegemeier, Woodward, Hane (1990) +# Application of the Piecewise Parabolic Method (PPM) to +# Meteorological Modeling +# [DOI: 10.1175/1520-0493(1990)118<0586:AOTPPM>2.0.CO;2](https://doi.org/10.1175/1520-0493(1990)118<0586:AOTPPM>2.0.CO;2) +struct WarmBubbleSetup + # Physical constants + g::Float64 # gravity of earth + c_p::Float64 # heat capacity for constant pressure (dry air) + c_v::Float64 # heat capacity for constant volume (dry air) + gamma::Float64 # heat capacity ratio (dry air) + + function WarmBubbleSetup(; g = 9.81, c_p = 1004.0, c_v = 717.0, gamma = c_p / c_v) + new(g, c_p, c_v, gamma) + end +end + +# Initial condition +function (setup::WarmBubbleSetup)(x, t, equations::CompressibleEulerEquations2D) + @unpack g, c_p, c_v = setup + + # center of perturbation + center_x = 10000.0 + center_z = 2000.0 + # radius of perturbation + radius = 2000.0 + # distance of current x to center of perturbation + r = sqrt((x[1] - center_x)^2 + (x[2] - center_z)^2) + + # perturbation in potential temperature + potential_temperature_ref = 300.0 + potential_temperature_perturbation = 0.0 + if r <= radius + potential_temperature_perturbation = 2 * cospi(0.5 * r / radius)^2 + end + potential_temperature = potential_temperature_ref + potential_temperature_perturbation + + # Exner pressure, solves hydrostatic equation for x[2] + exner = 1 - g / (c_p * potential_temperature) * x[2] + + # pressure + p_0 = 100_000.0 # reference pressure + R = c_p - c_v # gas constant (dry air) + p = p_0 * exner^(c_p / R) + + # temperature + T = potential_temperature * exner + + # density + rho = p / (R * T) + + v1 = 20.0 + v2 = 0.0 + E = c_v * T + 0.5 * (v1^2 + v2^2) + return SVector(rho, rho * v1, rho * v2, rho * E) +end + +# Source terms +@inline function (setup::WarmBubbleSetup)(u, x, t, equations::CompressibleEulerEquations2D) + @unpack g = setup + rho, _, rho_v2, _ = u + return SVector(zero(eltype(u)), zero(eltype(u)), -g * rho, -g * rho_v2) +end + +############################################################################### +# semidiscretization of the compressible Euler equations +warm_bubble_setup = WarmBubbleSetup() + +equations = CompressibleEulerEquations2D(warm_bubble_setup.gamma) + +boundary_conditions = (x_neg = boundary_condition_periodic, + x_pos = boundary_condition_periodic, + y_neg = boundary_condition_slip_wall, + y_pos = boundary_condition_slip_wall) + +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) + +# This is a good estimate for the speed of sound in this example. +# Other values between 300 and 400 should work as well. +surface_flux = FluxLMARS(340.0) + +volume_flux = flux_kennedy_gruber +volume_integral = VolumeIntegralFluxDifferencing(volume_flux) + +solver = DGSEM(basis, surface_flux, volume_integral) + +coordinates_min = (0.0, 0.0) +coordinates_max = (20_000.0, 10_000.0) + +cells_per_dimension = (64, 32) +mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) + +semi = SemidiscretizationHyperbolic(mesh, equations, warm_bubble_setup, solver, + source_terms = warm_bubble_setup, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1000.0) # 1000 seconds final time + +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 + +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_errors = (:entropy_conservation_error,)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = analysis_interval, + save_initial_solution = true, + save_final_solution = true, + output_directory = "out", + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + maxiters = 1.0e7, + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl b/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl index 61dd252fd83..a6a56aa807c 100644 --- a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl +++ b/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl index 653bab41e2d..0589e76a6a9 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_amr_solution_independent.jl @@ -93,12 +93,10 @@ solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) coordinates_min = (-5.0, -5.0) coordinates_max = (5.0, 5.0) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (1, 1) mesh = T8codeMesh(trees_per_dimension, polydeg = 3, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 1) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl index adf1d009a59..0923e328487 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_amr_unstructured_flag.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -31,19 +30,17 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which # we can create a t8code mesh. conn = Trixi.read_inp_p4est(mesh_file, Val(2)) -mesh = T8codeMesh{2}(conn, polydeg = 3, - mapping = mapping_flag, - initial_refinement_level = 1) +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping_flag, + initial_refinement_level = 1) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions = boundary_conditions) diff --git a/examples/t8code_2d_dgsem/elixir_advection_basic.jl b/examples/t8code_2d_dgsem/elixir_advection_basic.jl index efc51226586..26ced0970fe 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_basic.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_basic.jl @@ -16,12 +16,10 @@ solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) coordinates_min = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) coordinates_max = (1.0, 1.0) # maximum coordinates (max(x), max(y)) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (8, 8) mesh = T8codeMesh(trees_per_dimension, polydeg = 3, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 1) # A semidiscretization collects data structures and functions for the spatial discretization diff --git a/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl index 31a8bc93697..a39f3a7e195 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_nonconforming_flag.jl @@ -20,31 +20,28 @@ f4(s) = SVector(s, 1.0 + sin(0.5 * pi * s)) faces = (f1, f2, f3, f4) mapping = Trixi.transfinite_mapping(faces) -# Create P4estMesh with 3 x 2 trees and 6 x 4 elements, +# Create T8codeMesh with 3 x 2 trees and 6 x 4 elements, # approximate the geometry with a smaller polydeg for testing. trees_per_dimension = (3, 2) mesh = T8codeMesh(trees_per_dimension, polydeg = 3, mapping = mapping, initial_refinement_level = 1) -function adapt_callback(forest, - forest_from, - which_tree, - lelement_id, - ts, - is_family, - num_elements, - elements_ptr)::Cint - vertex = Vector{Cdouble}(undef, 3) - - elements = unsafe_wrap(Array, elements_ptr, num_elements) - - Trixi.t8_element_vertex_reference_coords(ts, elements[1], 0, pointer(vertex)) +# Note: This is actually a `p4est_quadrant_t` which is much bigger than the +# following struct. But we only need the first three fields for our purpose. +struct t8_dquad_t + x::Int32 + y::Int32 + level::Int8 + # [...] # See `p4est.h` in `p4est` for more info. +end - level = Trixi.t8_element_level(ts, elements[1]) +# Refine quadrants of each tree at lower left edge to level 4. +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + el = unsafe_load(Ptr{t8_dquad_t}(elements[1])) - # TODO: Make this condition more general. - if vertex[1] < 1e-8 && vertex[2] < 1e-8 && level < 4 + if el.x == 0 && el.y == 0 && el.level < 4 # return true (refine) return 1 else @@ -53,26 +50,7 @@ function adapt_callback(forest, end end -Trixi.@T8_ASSERT(Trixi.t8_forest_is_committed(mesh.forest)!=0); - -# Init new forest. -new_forest_ref = Ref{Trixi.t8_forest_t}() -Trixi.t8_forest_init(new_forest_ref); -new_forest = new_forest_ref[] - -# Check out `examples/t8_step4_partition_balance_ghost.jl` in -# https://github.com/DLR-AMR/T8code.jl for detailed explanations. -let set_from = C_NULL, recursive = 1, set_for_coarsening = 0, no_repartition = 0 - Trixi.t8_forest_set_user_data(new_forest, C_NULL) - Trixi.t8_forest_set_adapt(new_forest, mesh.forest, - Trixi.@t8_adapt_callback(adapt_callback), recursive) - Trixi.t8_forest_set_balance(new_forest, set_from, no_repartition) - Trixi.t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - Trixi.t8_forest_set_ghost(new_forest, 1, Trixi.T8_GHOST_FACES) - Trixi.t8_forest_commit(new_forest) -end - -mesh.forest = new_forest +Trixi.adapt!(mesh, adapt_callback) # A semidiscretization collects data structures and functions for the spatial discretization semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, diff --git a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl index df9cbc26f6e..ba8f1b59b80 100644 --- a/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_advection_unstructured_flag.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -28,19 +27,17 @@ Trixi.validate_faces(faces) mapping_flag = Trixi.transfinite_mapping(faces) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n. -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which # we can create a t8code mesh. conn = Trixi.read_inp_p4est(mesh_file, Val(2)) -mesh = T8codeMesh{2}(conn, polydeg = 3, - mapping = mapping_flag, - initial_refinement_level = 2) +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping_flag, + initial_refinement_level = 2) # A semidiscretization collects data structures and functions for the spatial discretization. semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, diff --git a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl index 01e0449c67e..5e6c4193c50 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_free_stream.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -30,35 +29,25 @@ end # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 48 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_1.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/a075f8ec39a67fa9fad8f6f84342cbca/raw/a7206a02ed3a5d3cadacd8d9694ac154f9151db7/square_unstructured_1.inp", + joinpath(@__DIR__, "square_unstructured_1.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which # we can create a t8code mesh. conn = Trixi.read_inp_p4est(mesh_file, Val(2)) -mesh = T8codeMesh{2}(conn, polydeg = 3, - mapping = mapping, - initial_refinement_level = 1) - -function adapt_callback(forest, - forest_from, - which_tree, - lelement_id, - ts, - is_family, - num_elements, - elements_ptr)::Cint - vertex = Vector{Cdouble}(undef, 3) +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping, + initial_refinement_level = 1) - elements = unsafe_wrap(Array, elements_ptr, num_elements) +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + vertex = Vector{Cdouble}(undef, 3) - Trixi.t8_element_vertex_reference_coords(ts, elements[1], 0, pointer(vertex)) + Trixi.t8_element_vertex_reference_coords(eclass_scheme, elements[1], 0, vertex) - level = Trixi.t8_element_level(ts, elements[1]) + level = Trixi.t8_element_level(eclass_scheme, elements[1]) # TODO: Make this condition more general. if vertex[1] < 1e-8 && vertex[2] < 1e-8 && level < 3 @@ -70,26 +59,7 @@ function adapt_callback(forest, end end -Trixi.@T8_ASSERT(Trixi.t8_forest_is_committed(mesh.forest)!=0); - -# Init new forest. -new_forest_ref = Ref{Trixi.t8_forest_t}() -Trixi.t8_forest_init(new_forest_ref); -new_forest = new_forest_ref[] - -# Check out `examples/t8_step4_partition_balance_ghost.jl` in -# https://github.com/DLR-AMR/T8code.jl for detailed explanations. -let set_from = C_NULL, recursive = 1, set_for_coarsening = 0, no_repartition = 0 - Trixi.t8_forest_set_user_data(new_forest, C_NULL) - Trixi.t8_forest_set_adapt(new_forest, mesh.forest, - Trixi.@t8_adapt_callback(adapt_callback), recursive) - Trixi.t8_forest_set_balance(new_forest, set_from, no_repartition) - Trixi.t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - Trixi.t8_forest_set_ghost(new_forest, 1, Trixi.T8_GHOST_FACES) - Trixi.t8_forest_commit(new_forest) -end - -mesh.forest = new_forest +Trixi.adapt!(mesh, adapt_callback) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition))) diff --git a/examples/t8code_2d_dgsem/elixir_euler_sedov.jl b/examples/t8code_2d_dgsem/elixir_euler_sedov.jl index 965d794f8dc..82770a4050b 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_sedov.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_sedov.jl @@ -58,12 +58,10 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, coordinates_min = (-1.0, -1.0) coordinates_max = (1.0, 1.0) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (4, 4) mesh = T8codeMesh(trees_per_dimension, polydeg = 4, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 2, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl b/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl index 55a9063a001..9ebbd1d28c4 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_shockcapturing_ec.jl @@ -29,12 +29,10 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, coordinates_min = (-1.0, -1.0) coordinates_max = (1.0, 1.0) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (4, 4) mesh = T8codeMesh(trees_per_dimension, polydeg = 4, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 2, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl index 21f26d79ba8..e496eb76729 100644 --- a/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl +++ b/examples/t8code_2d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_flag.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -30,35 +29,25 @@ mapping_flag = Trixi.transfinite_mapping(faces) # Get the uncurved mesh from a file (downloads the file if not available locally) # Unstructured mesh with 24 cells of the square domain [-1, 1]^n -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which # we can create a t8code mesh. conn = Trixi.read_inp_p4est(mesh_file, Val(2)) -mesh = T8codeMesh{2}(conn, polydeg = 3, - mapping = mapping_flag, - initial_refinement_level = 1) - -function adapt_callback(forest, - forest_from, - which_tree, - lelement_id, - ts, - is_family, - num_elements, - elements_ptr)::Cint - vertex = Vector{Cdouble}(undef, 3) +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping_flag, + initial_refinement_level = 1) - elements = unsafe_wrap(Array, elements_ptr, num_elements) +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + vertex = Vector{Cdouble}(undef, 3) - Trixi.t8_element_vertex_reference_coords(ts, elements[1], 0, pointer(vertex)) + Trixi.t8_element_vertex_reference_coords(eclass_scheme, elements[1], 0, pointer(vertex)) - level = Trixi.t8_element_level(ts, elements[1]) + level = Trixi.t8_element_level(eclass_scheme, elements[1]) # TODO: Make this condition more general. if vertex[1] < 1e-8 && vertex[2] < 1e-8 && level < 2 @@ -70,26 +59,7 @@ function adapt_callback(forest, end end -@assert(Trixi.t8_forest_is_committed(mesh.forest)!=0); - -# Init new forest. -new_forest_ref = Ref{Trixi.t8_forest_t}() -Trixi.t8_forest_init(new_forest_ref); -new_forest = new_forest_ref[] - -# Check out `examples/t8_step4_partition_balance_ghost.jl` in -# https://github.com/DLR-AMR/T8code.jl for detailed explanations. -let set_from = C_NULL, recursive = 1, set_for_coarsening = 0, no_repartition = 0 - Trixi.t8_forest_set_user_data(new_forest, C_NULL) - Trixi.t8_forest_set_adapt(new_forest, mesh.forest, - Trixi.@t8_adapt_callback(adapt_callback), recursive) - Trixi.t8_forest_set_balance(new_forest, set_from, no_repartition) - Trixi.t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - Trixi.t8_forest_set_ghost(new_forest, 1, Trixi.T8_GHOST_FACES) - Trixi.t8_forest_commit(new_forest) -end - -mesh.forest = new_forest +Trixi.adapt!(mesh, adapt_callback) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, source_terms = source_terms, diff --git a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl index 6d6bb27e0c3..98a9a5521a9 100644 --- a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl @@ -16,10 +16,8 @@ coordinates_max = (2.0, 2.0) trees_per_dimension = (1, 1) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - mesh = T8codeMesh(trees_per_dimension, polydeg = 1, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 2) semi_euler = SemidiscretizationHyperbolic(mesh, equations_euler, initial_condition, diff --git a/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl b/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl index 1e2362a123c..e184cb3fd05 100644 --- a/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl +++ b/examples/t8code_2d_dgsem/elixir_mhd_alfven_wave.jl @@ -11,18 +11,17 @@ initial_condition = initial_condition_convergence_test # Get the DG approximation space volume_flux = (flux_central, flux_nonconservative_powell) + solver = DGSEM(polydeg = 4, surface_flux = (flux_hlle, flux_nonconservative_powell), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) coordinates_min = (0.0, 0.0) coordinates_max = (sqrt(2.0), sqrt(2.0)) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (8, 8) mesh = T8codeMesh(trees_per_dimension, polydeg = 3, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 0, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl index 9a4bd99e444..ff2e40ae607 100644 --- a/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl +++ b/examples/t8code_2d_dgsem/elixir_mhd_rotor.jl @@ -1,4 +1,3 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -68,19 +67,17 @@ function mapping_twist(xi, eta) return SVector(x, y) end -mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") -isfile(mesh_file) || - download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", - mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", + joinpath(@__DIR__, "square_unstructured_2.inp")) # INP mesh files are only support by p4est. Hence, we # create a p4est connecvity object first from which # we can create a t8code mesh. conn = Trixi.read_inp_p4est(mesh_file, Val(2)) -mesh = T8codeMesh{2}(conn, polydeg = 4, - mapping = mapping_twist, - initial_refinement_level = 1) +mesh = T8codeMesh(conn, polydeg = 4, + mapping = mapping_twist, + initial_refinement_level = 1) boundary_condition = BoundaryConditionDirichlet(initial_condition) boundary_conditions = Dict(:all => boundary_condition) diff --git a/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl b/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl index b2d5097036f..3610639d554 100644 --- a/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl +++ b/examples/t8code_2d_dgsem/elixir_shallowwater_source_terms.jl @@ -22,12 +22,10 @@ solver = DGSEM(polydeg = 3, coordinates_min = (0.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max = (sqrt(2.0), sqrt(2.0)) # maximum coordinates (max(x), max(y)) -mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) - trees_per_dimension = (8, 8) mesh = T8codeMesh(trees_per_dimension, polydeg = 3, - mapping = mapping, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, initial_refinement_level = 1) # A semidiscretization collects data structures and functions for the spatial discretization diff --git a/examples/t8code_3d_dgsem/elixir_advection_amr.jl b/examples/t8code_3d_dgsem/elixir_advection_amr.jl new file mode 100644 index 00000000000..5a4b2218d57 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_amr.jl @@ -0,0 +1,66 @@ +# The same setup as tree_3d_dgsem/elixir_advection_amr.jl +# to verify the T8codeMesh implementation against TreeMesh. + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +initial_condition = initial_condition_gauss +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +coordinates_min = (-5.0, -5.0, -5.0) +coordinates_max = (5.0, 5.0, 5.0) +trees_per_dimension = (1, 1, 1) + +# Note that it is not necessary to use mesh polydeg lower than the solver polydeg +# on a Cartesian mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +mesh = T8codeMesh(trees_per_dimension, polydeg = 1, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + initial_refinement_level = 4) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.3) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_integrals = (entropy,)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), + base_level = 4, + med_level = 5, med_threshold = 0.1, + max_level = 6, max_threshold = 0.6) +amr_callback = AMRCallback(semi, amr_controller, + interval = 5, + adapt_initial_condition = true, + adapt_initial_condition_only_refine = true) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + amr_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl new file mode 100644 index 00000000000..e7c0f4b7318 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_amr_unstructured_curved.jl @@ -0,0 +1,102 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +# Solver with polydeg=4 to ensure free stream preservation (FSP) on non-conforming meshes. +# The polydeg of the solver must be at least twice as big as the polydeg of the mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) + +initial_condition = initial_condition_gauss +boundary_condition = BoundaryConditionDirichlet(initial_condition) + +boundary_conditions = Dict(:all => boundary_condition) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. +# The mapping will be interpolated at tree level, and then refined without changing +# the geometry interpolant. The original mapping applied to this unstructured mesh +# causes some Jacobians to be negative, which makes the mesh invalid. +function mapping(xi, eta, zeta) + # Don't transform input variables between -1 and 1 onto [0,3] to obtain curved boundaries + # xi = 1.5 * xi_ + 1.5 + # eta = 1.5 * eta_ + 1.5 + # zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 1 / 4 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 1 / 4 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 1 / 4 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + # Transform the weird deformed cube to be approximately the size of [-5,5]^3 to match IC + return SVector(5 * x, 5 * y, 5 * z) +end + +# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3. +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connectivity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +mesh = T8codeMesh(conn, polydeg = 2, + mapping = mapping, + initial_refinement_level = 1) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 8.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_integrals = (entropy,)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable = first), + base_level = 1, + med_level = 2, med_threshold = 0.1, + max_level = 3, max_threshold = 0.6) +amr_callback = AMRCallback(semi, amr_controller, + interval = 5, + adapt_initial_condition = true, + adapt_initial_condition_only_refine = true) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + amr_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_advection_basic.jl b/examples/t8code_3d_dgsem/elixir_advection_basic.jl new file mode 100644 index 00000000000..f49462035aa --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_basic.jl @@ -0,0 +1,59 @@ +# The same setup as tree_3d_dgsem/elixir_advection_basic.jl +# to verify the T8codeMesh implementation against TreeMesh + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +coordinates_min = (-1.0, -1.0, -1.0) # minimum coordinates (min(x), min(y), min(z)) +coordinates_max = (1.0, 1.0, 1.0) # maximum coordinates (max(x), max(y), max(z)) + +# Create P4estMesh with 8 x 8 x 8 elements (note `refinement_level=1`) +trees_per_dimension = (4, 4, 4) +mesh = T8codeMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + initial_refinement_level = 1) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 1.0 +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback = AnalysisCallback(semi, interval = 100) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl = 1.2) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +# Print the timer summary +summary_callback() diff --git a/examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl b/examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl new file mode 100644 index 00000000000..8d7a48370f5 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_nonconforming.jl @@ -0,0 +1,85 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# Semidiscretization of the linear advection equation. + +advection_velocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux. +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +coordinates_min = (-1.0, -1.0, -1.0) # minimum coordinates (min(x), min(y), min(z)) +coordinates_max = (1.0, 1.0, 1.0) # maximum coordinates (max(x), max(y), max(z)) +trees_per_dimension = (1, 1, 1) + +# Note that it is not necessary to use mesh polydeg lower than the solver polydeg +# on a Cartesian mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +mesh = T8codeMesh(trees_per_dimension, polydeg = 3, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + initial_refinement_level = 2) + +# Note: This is actually a `p8est_quadrant_t` which is much bigger than the +# following struct. But we only need the first four fields for our purpose. +struct t8_dhex_t + x::Int32 + y::Int32 + z::Int32 + level::Int8 + # [...] # See `p8est.h` in `p4est` for more info. +end + +# Refine bottom left quadrant of each second tree to level 2 +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + el = unsafe_load(Ptr{t8_dhex_t}(elements[1])) + + if iseven(convert(Int, ltreeid)) && el.x == 0 && el.y == 0 && el.z == 0 && + el.level < 3 + # return true (refine) + return 1 + else + # return false (don't refine) + return 0 + end +end + +Trixi.adapt!(mesh, adapt_callback) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 1.0 +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback = AnalysisCallback(semi, interval = 100) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl = 1.6) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +# Print the timer summary +summary_callback() diff --git a/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl new file mode 100644 index 00000000000..ee27ee117fe --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_advection_unstructured_curved.jl @@ -0,0 +1,95 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advection_velocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advection_velocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs) + +initial_condition = initial_condition_convergence_test + +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict(:all => boundary_condition) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. +# The mapping will be interpolated at tree level, and then refined without changing +# the geometry interpolant. The original mapping applied to this unstructured mesh +# causes some Jacobians to be negative, which makes the mesh invalid. +function mapping(xi, eta, zeta) + # Don't transform input variables between -1 and 1 onto [0,3] to obtain curved boundaries + # xi = 1.5 * xi_ + 1.5 + # eta = 1.5 * eta_ + 1.5 + # zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 1 / 6 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 1 / 6 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 1 / 6 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + return SVector(x, y, z) +end + +# Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connectivity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping, + initial_refinement_level = 2) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 0.1 +ode = semidiscretize(semi, (0.0, 0.1)); + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +# The StepsizeCallback handles the re-calculation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl = 1.2) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +# Print the timer summary +summary_callback() diff --git a/examples/t8code_3d_dgsem/elixir_euler_ec.jl b/examples/t8code_3d_dgsem/elixir_euler_ec.jl new file mode 100644 index 00000000000..b720bfcd375 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_ec.jl @@ -0,0 +1,89 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(5 / 3) + +initial_condition = initial_condition_weak_blast_wave + +boundary_conditions = Dict(:all => boundary_condition_slip_wall) + +# Get the DG approximation space + +volume_flux = flux_ranocha +solver = DGSEM(polydeg = 5, surface_flux = flux_ranocha, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +# Get the curved quad mesh from a file + +# Mapping as described in https://arxiv.org/abs/2012.12040 +function mapping(xi_, eta_, zeta_) + # Transform input variables between -1 and 1 onto [0,3] + xi = 1.5 * xi_ + 1.5 + eta = 1.5 * eta_ + 1.5 + zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 3 / 8 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 3 / 8 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 3 / 8 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + return SVector(x, y, z) +end + +# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connectivity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +mesh = T8codeMesh(conn, polydeg = 5, + mapping = mapping, + initial_refinement_level = 0) + +# Create the semidiscretization object. +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 2.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl new file mode 100644 index 00000000000..b70a6091adf --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream.jl @@ -0,0 +1,115 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_constant + +boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition)) + +# Solver with polydeg=4 to ensure free stream preservation (FSP) on non-conforming meshes. +# The polydeg of the solver must be at least twice as big as the polydeg of the mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralWeakForm()) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. +# The mapping will be interpolated at tree level, and then refined without changing +# the geometry interpolant. This can yield problematic geometries if the unrefined mesh +# is not fine enough. +function mapping(xi_, eta_, zeta_) + # Transform input variables between -1 and 1 onto [0,3] + xi = 1.5 * xi_ + 1.5 + eta = 1.5 * eta_ + 1.5 + zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 1 / 6 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 1 / 6 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 1 / 6 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + return SVector(x, y, z) +end + +# Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connectivity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +mesh = T8codeMesh(conn, polydeg = 2, + mapping = mapping, + initial_refinement_level = 0) + +# Note: This is actually a `p8est_quadrant_t` which is much bigger than the +# following struct. But we only need the first four fields for our purpose. +struct t8_dhex_t + x::Int32 + y::Int32 + z::Int32 + level::Int8 + # [...] # See `p8est.h` in `p4est` for more info. +end + +# Refine bottom left quadrant of each second tree to level 2 +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + el = unsafe_load(Ptr{t8_dhex_t}(elements[1])) + + if iseven(convert(Int, ltreeid)) && el.x == 0 && el.y == 0 && el.z == 0 && + el.level < 2 + # return true (refine) + return 1 + else + # return false (don't refine) + return 0 + end +end + +Trixi.adapt!(mesh, adapt_callback) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl new file mode 100644 index 00000000000..6ae38d20b5a --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_free_stream_extruded.jl @@ -0,0 +1,103 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_constant + +boundary_conditions = Dict(:all => BoundaryConditionDirichlet(initial_condition)) + +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralWeakForm()) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but reduced to 2D. +# This particular mesh is unstructured in the yz-plane, but extruded in x-direction. +# Apply the warping mapping in the yz-plane to get a curved 2D mesh that is extruded +# in x-direction to ensure free stream preservation on a non-conforming mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +function mapping(xi, eta_, zeta_) + # Transform input variables between -1 and 1 onto [0,3] + eta = 1.5 * eta_ + 1.5 + zeta = 1.5 * zeta_ + 1.5 + + z = zeta + + 1 / 6 * (cos(1.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + y = eta + 1 / 6 * (cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(2 * pi * (2 * z - 3) / 3)) + + return SVector(xi, y, z) +end + +# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + joinpath(@__DIR__, "cube_unstructured_2.inp")) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connecvity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +mesh = T8codeMesh(conn, polydeg = 3, + mapping = mapping, + initial_refinement_level = 0) + +# Note: This is actually a `p8est_quadrant_t` which is much bigger than the +# following struct. But we only need the first four fields for our purpose. +struct t8_dhex_t + x::Int32 + y::Int32 + z::Int32 + level::Int8 + # [...] # See `p8est.h` in `p4est` for more info. +end + +# Refine quadrants in y-direction of each tree at one edge to level 2 +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + el = unsafe_load(Ptr{t8_dhex_t}(elements[1])) + + if convert(Int, ltreeid) < 4 && el.x == 0 && el.y == 0 && el.level < 2 + # return true (refine) + return 1 + else + # return false (don't refine) + return 0 + end +end + +Trixi.adapt!(mesh, adapt_callback) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), #maxiters=1, + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_euler_sedov.jl b/examples/t8code_3d_dgsem/elixir_euler_sedov.jl new file mode 100644 index 00000000000..618b170b661 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_sedov.jl @@ -0,0 +1,97 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +""" + initial_condition_medium_sedov_blast_wave(x, t, equations::CompressibleEulerEquations3D) + +The Sedov blast wave setup based on Flash +- https://flash.rochester.edu/site/flashcode/user_support/flash_ug_devel/node187.html#SECTION010114000000000000000 +with smaller strength of the initial discontinuity. +""" +function initial_condition_medium_sedov_blast_wave(x, t, + equations::CompressibleEulerEquations3D) + # Set up polar coordinates + inicenter = SVector(0.0, 0.0, 0.0) + x_norm = x[1] - inicenter[1] + y_norm = x[2] - inicenter[2] + z_norm = x[3] - inicenter[3] + r = sqrt(x_norm^2 + y_norm^2 + z_norm^2) + + # Setup based on https://flash.rochester.edu/site/flashcode/user_support/flash_ug_devel/node187.html#SECTION010114000000000000000 + r0 = 0.21875 # = 3.5 * smallest dx (for domain length=4 and max-ref=6) + E = 1.0 + p0_inner = 3 * (equations.gamma - 1) * E / (4 * pi * r0^2) + p0_outer = 1.0e-3 + + # Calculate primitive variables + rho = 1.0 + v1 = 0.0 + v2 = 0.0 + v3 = 0.0 + p = r > r0 ? p0_outer : p0_inner + + return prim2cons(SVector(rho, v1, v2, v3, p), equations) +end + +initial_condition = initial_condition_medium_sedov_blast_wave + +surface_flux = flux_lax_friedrichs +volume_flux = flux_ranocha +polydeg = 5 +basis = LobattoLegendreBasis(polydeg) +indicator_sc = IndicatorHennemannGassner(equations, basis, + alpha_max = 1.0, + alpha_min = 0.001, + alpha_smooth = true, + variable = density_pressure) +volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) + +solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, + volume_integral = volume_integral) + +coordinates_min = (-1.0, -1.0, -1.0) +coordinates_max = (1.0, 1.0, 1.0) + +trees_per_dimension = (4, 4, 4) +mesh = T8codeMesh(trees_per_dimension, + polydeg = 4, initial_refinement_level = 0, + coordinates_min = coordinates_min, coordinates_max = coordinates_max, + periodicity = true) + +# create the semi discretization object +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 12.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 0.5) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl new file mode 100644 index 00000000000..6856be36ea1 --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonconforming_unstructured_curved.jl @@ -0,0 +1,117 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_convergence_test + +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict(:all => boundary_condition) + +# Solver with polydeg=4 to ensure free stream preservation (FSP) on non-conforming meshes. +# The polydeg of the solver must be at least twice as big as the polydeg of the mesh. +# See https://doi.org/10.1007/s10915-018-00897-9, Section 6. +solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralWeakForm()) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. +# The mapping will be interpolated at tree level, and then refined without changing +# the geometry interpolant. The original mapping applied to this unstructured mesh +# causes some Jacobians to be negative, which makes the mesh invalid. +function mapping(xi, eta, zeta) + # Don't transform input variables between -1 and 1 onto [0,3] to obtain curved boundaries + # xi = 1.5 * xi_ + 1.5 + # eta = 1.5 * eta_ + 1.5 + # zeta = 1.5 * zeta_ + 1.5 + + y = eta + + 1 / 6 * (cos(1.5 * pi * (2 * xi - 3) / 3) * + cos(0.5 * pi * (2 * eta - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + x = xi + + 1 / 6 * (cos(0.5 * pi * (2 * xi - 3) / 3) * + cos(2 * pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + z = zeta + + 1 / 6 * (cos(0.5 * pi * (2 * x - 3) / 3) * + cos(pi * (2 * y - 3) / 3) * + cos(0.5 * pi * (2 * zeta - 3) / 3)) + + # Transform the weird deformed cube to be approximately the cube [0,2]^3 + return SVector(x + 1, y + 1, z + 1) +end + +# Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 +mesh_file = Trixi.download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + joinpath(@__DIR__, "cube_unstructured_1.inp")) + +# INP mesh files are only support by p4est. Hence, we +# create a p4est connecvity object first from which +# we can create a t8code mesh. +conn = Trixi.read_inp_p4est(mesh_file, Val(3)) + +# Mesh polydeg of 2 (half the solver polydeg) to ensure FSP (see above). +mesh = T8codeMesh(conn, polydeg = 2, + mapping = mapping, + initial_refinement_level = 0) + +# Note: This is actually a `p8est_quadrant_t` which is much bigger than the +# following struct. But we only need the first four fields for our purpose. +struct t8_dhex_t + x::Int32 + y::Int32 + z::Int32 + level::Int8 + # [...] # See `p8est.h` in `p4est` for more info. +end + +function adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, + user_data) + el = unsafe_load(Ptr{t8_dhex_t}(elements[1])) + + if el.x == 0 && el.y == 0 && el.z == 0 && el.level < 2 + # return true (refine) + return 1 + else + # return false (don't refine) + return 0 + end +end + +Trixi.adapt!(mesh, adapt_callback) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.045) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 0.6) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback); + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl new file mode 100644 index 00000000000..7cb03bb312d --- /dev/null +++ b/examples/t8code_3d_dgsem/elixir_euler_source_terms_nonperiodic.jl @@ -0,0 +1,62 @@ +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_convergence_test + +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict(:x_neg => boundary_condition, + :x_pos => boundary_condition, + :y_neg => boundary_condition, + :y_pos => boundary_condition, + :z_neg => boundary_condition, + :z_pos => boundary_condition) + +solver = DGSEM(polydeg = 3, surface_flux = flux_lax_friedrichs, + volume_integral = VolumeIntegralWeakForm()) + +coordinates_min = (0.0, 0.0, 0.0) +coordinates_max = (2.0, 2.0, 2.0) + +trees_per_dimension = (2, 2, 2) + +mapping = Trixi.coordinates2mapping(coordinates_min, coordinates_max) + +mesh = T8codeMesh(trees_per_dimension, polydeg = 1, + mapping = mapping, + periodicity = false, initial_refinement_level = 1) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 5.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 0.6) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl new file mode 100644 index 00000000000..59258018f8c --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl @@ -0,0 +1,54 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### + +equations = TrafficFlowLWREquations1D() + +# Use first order finite volume to prevent oscillations at the shock +solver = DGSEM(polydeg = 3, surface_flux = flux_hll) + +coordinates_min = 0.0 # minimum coordinate +coordinates_max = 2.0 # maximum coordinate + +# Create a uniformly refined mesh with periodic boundaries +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + +############################################################################### +# Specify non-periodic boundary conditions + +initial_condition = initial_condition_convergence_test +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 2.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.6) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(), + dt = 42, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl new file mode 100644 index 00000000000..d3a17b513fc --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl @@ -0,0 +1,82 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### + +equations = TrafficFlowLWREquations1D() + +# Use first order finite volume to prevent oscillations at the shock +solver = DGSEM(polydeg = 0, surface_flux = flux_lax_friedrichs) + +coordinates_min = -1.0 # minimum coordinate +coordinates_max = 1.0 # maximum coordinate + +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 9, + n_cells_max = 30_000, + periodicity = false) + +# Example taken from http://www.clawpack.org/riemann_book/html/Traffic_flow.html#Example:-Traffic-jam +# Discontinuous initial condition (Riemann Problem) leading to a shock that moves to the left. +# The shock corresponds to the traffic congestion. +function initial_condition_traffic_jam(x, t, equation::TrafficFlowLWREquations1D) + scalar = x[1] < 0.0 ? 0.5 : 1.0 + + return SVector(scalar) +end + +############################################################################### +# Specify non-periodic boundary conditions + +function outflow(x, t, equations::TrafficFlowLWREquations1D) + return initial_condition_traffic_jam(coordinates_min, t, equations) +end +boundary_condition_outflow = BoundaryConditionDirichlet(outflow) + +function boundary_condition_inflow(u_inner, orientation, normal_direction, x, t, + surface_flux_function, + equations::TrafficFlowLWREquations1D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, orientation, equations) + + return flux +end + +boundary_conditions = (x_neg = boundary_condition_outflow, + x_pos = boundary_condition_inflow) + +initial_condition = initial_condition_traffic_jam + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# Note: Be careful when increasing the polynomial degree and switching from first order finite volume +# to some actual DG method - in that case, you should also exchange the ODE solver. +sol = solve(ode, Euler(), + dt = 42, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl new file mode 100644 index 00000000000..1817672778a --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl @@ -0,0 +1,91 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations +gamma = 1.4 +equations = CompressibleEulerEquations2D(gamma) + +""" + initial_condition_kelvin_helmholtz_instability(x, t, equations::CompressibleEulerEquations2D) + +A version of the classical Kelvin-Helmholtz instability based on +- Andrés M. Rueda-Ramírez, Gregor J. Gassner (2021) + A Subcell Finite Volume Positivity-Preserving Limiter for DGSEM Discretizations + of the Euler Equations + [arXiv: 2102.06017](https://arxiv.org/abs/2102.06017) +""" +function initial_condition_kelvin_helmholtz_instability(x, t, + equations::CompressibleEulerEquations2D) + # change discontinuity to tanh + # typical resolution 128^2, 256^2 + # domain size is [-1,+1]^2 + slope = 15 + amplitude = 0.02 + B = tanh(slope * x[2] + 7.5) - tanh(slope * x[2] - 7.5) + rho = 0.5 + 0.75 * B + v1 = 0.5 * (B - 1) + v2 = 0.1 * sin(2 * pi * x[1]) + p = 1.0 + return prim2cons(SVector(rho, v1, v2, p), equations) +end +initial_condition = initial_condition_kelvin_helmholtz_instability + +surface_flux = flux_lax_friedrichs +volume_flux = flux_ranocha +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) + +limiter_idp = SubcellLimiterIDP(equations, basis; + positivity_variables_cons = ["rho"], + positivity_variables_nonlinear = [pressure]) +volume_integral = VolumeIntegralSubcellLimiting(limiter_idp; + volume_flux_dg = volume_flux, + volume_flux_fv = surface_flux) +solver = DGSEM(basis, surface_flux, volume_integral) + +coordinates_min = (-1.0, -1.0) +coordinates_max = (1.0, 1.0) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 5, + n_cells_max = 100_000) +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 3.7) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 100, + save_initial_solution = true, + save_final_solution = true, + solution_variables = cons2prim) + +save_restart = SaveRestartCallback(interval = 1000, + save_final_restart = true) + +stepsize_callback = StepsizeCallback(cfl = 0.7) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback, + save_restart, save_solution) + +############################################################################### +# run the simulation + +stage_callbacks = (SubcellLimiterIDPCorrection(), BoundsCheckCallback(save_errors = false)) + +sol = Trixi.solve(ode, Trixi.SimpleSSPRK33(stage_callbacks = stage_callbacks); + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + callback = callbacks); +summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_euler_warm_bubble.jl b/examples/tree_2d_dgsem/elixir_euler_warm_bubble.jl new file mode 100644 index 00000000000..f2e14273ae7 --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_euler_warm_bubble.jl @@ -0,0 +1,150 @@ +using OrdinaryDiffEq +using Trixi + +# Warm bubble test case from +# - Wicker, L. J., and Skamarock, W. C. (1998) +# A time-splitting scheme for the elastic equations incorporating +# second-order Runge–Kutta time differencing +# [DOI: 10.1175/1520-0493(1998)126%3C1992:ATSSFT%3E2.0.CO;2](https://doi.org/10.1175/1520-0493(1998)126%3C1992:ATSSFT%3E2.0.CO;2) +# See also +# - Bryan and Fritsch (2002) +# A Benchmark Simulation for Moist Nonhydrostatic Numerical Models +# [DOI: 10.1175/1520-0493(2002)130<2917:ABSFMN>2.0.CO;2](https://doi.org/10.1175/1520-0493(2002)130<2917:ABSFMN>2.0.CO;2) +# - Carpenter, Droegemeier, Woodward, Hane (1990) +# Application of the Piecewise Parabolic Method (PPM) to +# Meteorological Modeling +# [DOI: 10.1175/1520-0493(1990)118<0586:AOTPPM>2.0.CO;2](https://doi.org/10.1175/1520-0493(1990)118<0586:AOTPPM>2.0.CO;2) +struct WarmBubbleSetup + # Physical constants + g::Float64 # gravity of earth + c_p::Float64 # heat capacity for constant pressure (dry air) + c_v::Float64 # heat capacity for constant volume (dry air) + gamma::Float64 # heat capacity ratio (dry air) + + function WarmBubbleSetup(; g = 9.81, c_p = 1004.0, c_v = 717.0, gamma = c_p / c_v) + new(g, c_p, c_v, gamma) + end +end + +# Initial condition +function (setup::WarmBubbleSetup)(x, t, equations::CompressibleEulerEquations2D) + @unpack g, c_p, c_v = setup + + # center of perturbation + center_x = 10000.0 + center_z = 2000.0 + # radius of perturbation + radius = 2000.0 + # distance of current x to center of perturbation + r = sqrt((x[1] - center_x)^2 + (x[2] - center_z)^2) + + # perturbation in potential temperature + potential_temperature_ref = 300.0 + potential_temperature_perturbation = 0.0 + if r <= radius + potential_temperature_perturbation = 2 * cospi(0.5 * r / radius)^2 + end + potential_temperature = potential_temperature_ref + potential_temperature_perturbation + + # Exner pressure, solves hydrostatic equation for x[2] + exner = 1 - g / (c_p * potential_temperature) * x[2] + + # pressure + p_0 = 100_000.0 # reference pressure + R = c_p - c_v # gas constant (dry air) + p = p_0 * exner^(c_p / R) + + # temperature + T = potential_temperature * exner + + # density + rho = p / (R * T) + + v1 = 20.0 + v2 = 0.0 + E = c_v * T + 0.5 * (v1^2 + v2^2) + return SVector(rho, rho * v1, rho * v2, rho * E) +end + +# Source terms +@inline function (setup::WarmBubbleSetup)(u, x, t, equations::CompressibleEulerEquations2D) + @unpack g = setup + rho, _, rho_v2, _ = u + return SVector(zero(eltype(u)), zero(eltype(u)), -g * rho, -g * rho_v2) +end + +############################################################################### +# semidiscretization of the compressible Euler equations +warm_bubble_setup = WarmBubbleSetup() + +equations = CompressibleEulerEquations2D(warm_bubble_setup.gamma) + +boundary_conditions = (x_neg = boundary_condition_periodic, + x_pos = boundary_condition_periodic, + y_neg = boundary_condition_slip_wall, + y_pos = boundary_condition_slip_wall) + +polydeg = 3 +basis = LobattoLegendreBasis(polydeg) + +# This is a good estimate for the speed of sound in this example. +# Other values between 300 and 400 should work as well. +surface_flux = FluxLMARS(340.0) + +volume_flux = flux_kennedy_gruber +volume_integral = VolumeIntegralFluxDifferencing(volume_flux) + +solver = DGSEM(basis, surface_flux, volume_integral) + +coordinates_min = (0.0, 0.0) +coordinates_max = (20_000.0, 10_000.0) + +# Same coordinates as in examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl +# However TreeMesh will generate a 20_000 x 20_000 square domain instead +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 6, + n_cells_max = 10_000, + periodicity = (true, false)) + +semi = SemidiscretizationHyperbolic(mesh, equations, warm_bubble_setup, solver, + source_terms = warm_bubble_setup, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1000.0) # 1000 seconds final time + +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 + +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + extra_analysis_errors = (:entropy_conservation_error,)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = analysis_interval, + save_initial_solution = true, + save_final_solution = true, + output_directory = "out", + solution_variables = cons2prim) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + save_solution, + stepsize_callback) + +############################################################################### +# run the simulation +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + maxiters = 1.0e7, + dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() diff --git a/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl b/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl index fe9ad92467f..74d0370647a 100644 --- a/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl +++ b/examples/tree_2d_dgsem/elixir_mhd_shockcapturing_subcell.jl @@ -22,7 +22,7 @@ function initial_condition_blast_wave(x, t, equations::IdealGlmMhdEquations2D) r = sqrt(x[1]^2 + x[2]^2) pmax = 10.0 - pmin = 1.0 + pmin = 0.01 rhomax = 1.0 rhomin = 0.01 if r <= 0.09 @@ -52,7 +52,8 @@ basis = LobattoLegendreBasis(3) limiter_idp = SubcellLimiterIDP(equations, basis; positivity_variables_cons = ["rho"], - positivity_correction_factor = 0.5) + positivity_variables_nonlinear = [pressure], + positivity_correction_factor = 0.1) volume_integral = VolumeIntegralSubcellLimiting(limiter_idp; volume_flux_dg = volume_flux, volume_flux_fv = surface_flux) @@ -84,7 +85,7 @@ save_solution = SaveSolutionCallback(interval = 100, save_final_solution = true, solution_variables = cons2prim) -cfl = 0.5 +cfl = 0.4 stepsize_callback = StepsizeCallback(cfl = cfl) glm_speed_callback = GlmSpeedCallback(glm_scale = 0.5, cfl = cfl) diff --git a/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl b/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl index 8f8e867dca8..9741430d11c 100644 --- a/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl +++ b/examples/unstructured_2d_dgsem/elixir_acoustics_gauss_wall.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -14,11 +13,8 @@ equations = AcousticPerturbationEquations2D(v_mean_global = (0.0, -0.5), solver = DGSEM(polydeg = 4, surface_flux = flux_lax_friedrichs) # Create unstructured quadrilateral mesh from a file -default_mesh_file = joinpath(@__DIR__, "mesh_five_circles_in_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/3c79baad6b4d73bb26ec6420b5d16f45/raw/22aefc4ec2107cf0bffc40e81dfbc52240c625b1/mesh_five_circles_in_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/3c79baad6b4d73bb26ec6420b5d16f45/raw/22aefc4ec2107cf0bffc40e81dfbc52240c625b1/mesh_five_circles_in_circle.mesh", + joinpath(@__DIR__, "mesh_five_circles_in_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_advection_basic.jl b/examples/unstructured_2d_dgsem/elixir_advection_basic.jl index afef6c2c38f..c0ee453344d 100644 --- a/examples/unstructured_2d_dgsem/elixir_advection_basic.jl +++ b/examples/unstructured_2d_dgsem/elixir_advection_basic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -16,12 +15,8 @@ solver = DGSEM(polydeg = 6, surface_flux = flux_lax_friedrichs) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_basic.jl b/examples/unstructured_2d_dgsem/elixir_euler_basic.jl index cd6a1995757..f8976120d53 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_basic.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_basic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -25,12 +24,8 @@ solver = DGSEM(polydeg = 8, surface_flux = flux_lax_friedrichs) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/52056f1487853fab63b7f4ed7f171c80/raw/9d573387dfdbb8bce2a55db7246f4207663ac07f/mesh_trixi_unstructured_mesh_docs.mesh", + joinpath(@__DIR__, "mesh_trixi_unstructured_mesh_docs.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_ec.jl b/examples/unstructured_2d_dgsem/elixir_euler_ec.jl index 0f53aa62a18..58b4d9a1dd2 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_ec.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -19,12 +18,8 @@ solver = DGSEM(polydeg = 6, surface_flux = flux_ranocha, ############################################################################### # Get the curved quad mesh from a file - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl b/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl index a2fec1a320a..f266a3de0b2 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -26,12 +25,8 @@ solver = DGSEM(polydeg = 6, surface_flux = flux_hll) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_gingerbread_man.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", + joinpath(@__DIR__, "mesh_gingerbread_man.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl b/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl index afd177f0740..e640001ad7f 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_periodic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -19,12 +18,8 @@ solver = DGSEM(polydeg = 6, surface_flux = FluxRotated(flux_hll)) ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl b/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl index e1cc0932969..06053273b74 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_sedov.jl @@ -55,11 +55,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, volume_integral = volume_integral) # Get the curved quad mesh from a file -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl b/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl index b2abefe7eeb..65e5eb51ce6 100644 --- a/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl +++ b/examples/unstructured_2d_dgsem/elixir_euler_wall_bc.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -40,12 +39,8 @@ solver = DGSEM(polydeg = 4, surface_flux = flux_hll) ############################################################################### # Get the curved quad mesh from a file - -default_mesh_file = joinpath(@__DIR__, "mesh_box_around_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8b9b11a1eedfa54b215c122c3d17b271/raw/0d2b5d98c87e67a6f384693a8b8e54b4c9fcbf3d/mesh_box_around_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8b9b11a1eedfa54b215c122c3d17b271/raw/0d2b5d98c87e67a6f384693a8b8e54b4c9fcbf3d/mesh_box_around_circle.mesh", + joinpath(@__DIR__, "mesh_box_around_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl b/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl index 3ed3e828ca8..0c7152a6ea0 100644 --- a/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl +++ b/examples/unstructured_2d_dgsem/elixir_mhd_alfven_wave.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -17,12 +16,9 @@ solver = DGSEM(polydeg = 7, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) -mesh_file = default_mesh_file mesh = UnstructuredMesh2D(mesh_file, periodicity = true) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) diff --git a/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl b/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl index a40f92cac02..805934e305d 100644 --- a/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl +++ b/examples/unstructured_2d_dgsem/elixir_mhd_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -35,11 +34,8 @@ solver = DGSEM(polydeg = 6, volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl index 1148f25fae3..df1a69192ce 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -38,11 +37,8 @@ solver = DGSEM(polydeg = 4, surface_flux = (flux_hll, flux_nonconservative_fjord # This setup is for the curved, split form well-balancedness testing # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_outer_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", + joinpath(@__DIR__, "mesh_outer_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl index 8e9d396d826..9122fb8287d 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -25,11 +24,8 @@ solver = DGSEM(polydeg = 6, # This setup is for the curved, split form entropy conservation testing (needs periodic BCs) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl index 94202b81df0..98408db5a78 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_ec_shockcapturing.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -34,11 +33,8 @@ solver = DGSEM(basis, surface_flux, volume_integral) # This setup is for the curved, split form entropy conservation testing (needs periodic BCs) # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl index 07668688406..a7aa5808955 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_source_terms.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -23,11 +22,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, # This setup is for the curved, split form convergence test on a periodic domain # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl index 6164f9d4a55..df321aad267 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -93,16 +92,10 @@ solver = DGSEM(basis, surface_flux, volume_integral) ############################################################################### # Get the unstructured quad mesh from a file (downloads the file if not available locally) +mesh_file = Trixi.download("https://gist.githubusercontent.com/svengoldberg/c3c87fecb3fc6e46be7f0d1c7cb35f83/raw/e817ecd9e6c4686581d63c46128f9b6468d396d3/mesh_three_mound.mesh", + joinpath(@__DIR__, "mesh_three_mound.mesh")) -default_meshfile = joinpath(@__DIR__, "mesh_three_mound.mesh") - -isfile(default_meshfile) || - download("https://gist.githubusercontent.com/svengoldberg/c3c87fecb3fc6e46be7f0d1c7cb35f83/raw/e817ecd9e6c4686581d63c46128f9b6468d396d3/mesh_three_mound.mesh", - default_meshfile) - -meshfile = default_meshfile - -mesh = UnstructuredMesh2D(meshfile) +mesh = UnstructuredMesh2D(mesh_file) # Create the semi discretization object semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver; diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl index 0b86095663a..fcc08b6f991 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -24,11 +23,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, # This setup is for the curved, split form convergence test on a periodic domain # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl index 4ad5f7e3201..821f31c52ac 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -48,11 +47,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, ############################################################################### # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = false) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl index 6a727df2502..ca1f54595bb 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -42,11 +41,8 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, # This setup is for the curved, split form well-balancedness testing # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl index 76b9642d595..f115113ed27 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_wall_bc_shockcapturing.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -55,12 +54,8 @@ solver = DGSEM(polydeg = polydeg, surface_flux = surface_flux, ############################################################################### # Get the unstructured quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_outer_circle.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/9beddd9cd00e2a0a15865129eeb24928/raw/be71e67fa48bc4e1e97f5f6cd77c3ed34c6ba9be/mesh_outer_circle.mesh", + joinpath(@__DIR__, "mesh_outer_circle.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl index bf4d0be682a..6bad3a77f03 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_well_balanced.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -36,13 +35,9 @@ solver = DGSEM(polydeg = 6, surface_flux = surface_flux, ############################################################################### # This setup is for the curved, split form well-balancedness testing - # Get the unstructured quad mesh from a file (downloads the file if not available locally) -default_mesh_file = joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", + joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl b/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl index c181203e7a4..fe7e708f3b3 100644 --- a/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl +++ b/examples/unstructured_2d_fdsbp/elixir_advection_basic.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -21,12 +20,8 @@ solver = FDSBP(D_SBP, ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl index 7ada50c0c65..25a81c16bf9 100644 --- a/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl +++ b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -33,12 +32,8 @@ solver = FDSBP(D_SBP, ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_gingerbread_man.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/2c6440b5f8a57db131061ad7aa78ee2b/raw/1f89fdf2c874ff678c78afb6fe8dc784bdfd421f/mesh_gingerbread_man.mesh", + joinpath(@__DIR__, "mesh_gingerbread_man.mesh")) mesh = UnstructuredMesh2D(mesh_file) diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl index edcd221bf59..5f11d41ad5c 100644 --- a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl +++ b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms.jl @@ -1,5 +1,4 @@ -using Downloads: download using OrdinaryDiffEq using Trixi @@ -22,12 +21,8 @@ solver = FDSBP(D_SBP, ############################################################################### # Get the curved quad mesh from a file (downloads the file if not available locally) - -default_mesh_file = joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh") -isfile(default_mesh_file) || - download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", - default_mesh_file) -mesh_file = default_mesh_file +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/12ce661d7c354c3d94c74b964b0f1c96/raw/8275b9a60c6e7ebbdea5fc4b4f091c47af3d5273/mesh_periodic_square_with_twist.mesh", + joinpath(@__DIR__, "mesh_periodic_square_with_twist.mesh")) mesh = UnstructuredMesh2D(mesh_file, periodicity = true) diff --git a/src/Trixi.jl b/src/Trixi.jl index e18b2f6415c..bf0986084af 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -39,6 +39,7 @@ import SciMLBase: get_du, get_tmp_cache, u_modified!, get_proposed_dt, set_proposed_dt!, terminate!, remake, add_tstop!, has_tstop, first_tstop +using Downloads: Downloads using CodeTracking: CodeTracking using ConstructionBase: ConstructionBase using DiffEqCallbacks: PeriodicCallback, PeriodicCallbackAffect @@ -69,6 +70,8 @@ using Triangulate: Triangulate, TriangulateIO, triangulate export TriangulateIO # for type parameter in DGMultiMesh using TriplotBase: TriplotBase using TriplotRecipes: DGTriPseudocolor +@reexport using TrixiBase: trixi_include +using TrixiBase: TrixiBase @reexport using SimpleUnPack: @unpack using SimpleUnPack: @pack! using DataStructures: BinaryHeap, FasterForward, extract_all! @@ -128,7 +131,7 @@ include("callbacks_step/callbacks_step.jl") include("callbacks_stage/callbacks_stage.jl") include("semidiscretization/semidiscretization_euler_gravity.jl") -# `trixi_include` and special elixirs such as `convergence_test` +# Special elixirs such as `convergence_test` include("auxiliary/special_elixirs.jl") # Plot recipes and conversion functions to visualize results with Plots.jl @@ -154,7 +157,8 @@ export AcousticPerturbationEquations2D, ShallowWaterTwoLayerEquations1D, ShallowWaterTwoLayerEquations2D, ShallowWaterEquationsQuasi1D, LinearizedEulerEquations2D, - PolytropicEulerEquations2D + PolytropicEulerEquations2D, + TrafficFlowLWREquations1D export LaplaceDiffusion1D, LaplaceDiffusion2D, LaplaceDiffusion3D, CompressibleNavierStokesDiffusion1D, CompressibleNavierStokesDiffusion2D, diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index 1f7d30d6aa8..92da9a5ba8b 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -345,4 +345,27 @@ function register_error_hints() return nothing end + +""" + Trixi.download(src_url, file_path) + +Download a file from given `src_url` to given `file_path` if +`file_path` is not already a file. This function just returns +`file_path`. +This is a small wrapper of `Downloads.download(src_url, file_path)` +that avoids race conditions when multiple MPI ranks are used. +""" +function download(src_url, file_path) + # Note that `mpi_isroot()` is also `true` if running + # in serial (without MPI). + if mpi_isroot() + isfile(file_path) || Downloads.download(src_url, file_path) + end + + if mpi_isparallel() + MPI.Barrier(mpi_comm()) + end + + return file_path +end end # @muladd diff --git a/src/auxiliary/precompile.jl b/src/auxiliary/precompile.jl index 9cec502f6cb..4d5399b5ba3 100644 --- a/src/auxiliary/precompile.jl +++ b/src/auxiliary/precompile.jl @@ -577,9 +577,6 @@ function _precompile_manual_() @assert Base.precompile(Tuple{typeof(show), Base.TTY, lbm_collision_callback_type}) @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, MIME"text/plain", lbm_collision_callback_type}) - - # infrastructure, special elixirs - @assert Base.precompile(Tuple{typeof(trixi_include), String}) end @assert Base.precompile(Tuple{typeof(init_mpi)}) diff --git a/src/auxiliary/special_elixirs.jl b/src/auxiliary/special_elixirs.jl index 5fdd9aea0c5..d71a27aa96a 100644 --- a/src/auxiliary/special_elixirs.jl +++ b/src/auxiliary/special_elixirs.jl @@ -5,58 +5,6 @@ @muladd begin #! format: noindent -# Note: We can't call the method below `Trixi.include` since that is created automatically -# inside `module Trixi` to `include` source files and evaluate them within the global scope -# of `Trixi`. However, users will want to evaluate in the global scope of `Main` or something -# similar to manage dependencies on their own. -""" - trixi_include([mod::Module=Main,] elixir::AbstractString; kwargs...) - -`include` the file `elixir` and evaluate its content in the global scope of module `mod`. -You can override specific assignments in `elixir` by supplying keyword arguments. -It's basic purpose is to make it easier to modify some parameters while running Trixi.jl from the -REPL. Additionally, this is used in tests to reduce the computational burden for CI while still -providing examples with sensible default values for users. - -Before replacing assignments in `elixir`, the keyword argument `maxiters` is inserted -into calls to `solve` and `Trixi.solve` with it's default value used in the SciML ecosystem -for ODEs, see the "Miscellaneous" section of the -[documentation](https://docs.sciml.ai/DiffEqDocs/stable/basics/common_solver_opts/). - -# Examples - -```jldoctest -julia> redirect_stdout(devnull) do - trixi_include(@__MODULE__, joinpath(examples_dir(), "tree_1d_dgsem", "elixir_advection_extended.jl"), - tspan=(0.0, 0.1)) - sol.t[end] - end -[ Info: You just called `trixi_include`. Julia may now compile the code, please be patient. -0.1 -``` -""" -function trixi_include(mod::Module, elixir::AbstractString; kwargs...) - # Check that all kwargs exist as assignments - code = read(elixir, String) - expr = Meta.parse("begin \n$code \nend") - expr = insert_maxiters(expr) - - for (key, val) in kwargs - # This will throw an error when `key` is not found - find_assignment(expr, key) - end - - # Print information on potential wait time only in non-parallel case - if !mpi_isparallel() - @info "You just called `trixi_include`. Julia may now compile the code, please be patient." - end - Base.include(ex -> replace_assignments(insert_maxiters(ex); kwargs...), mod, elixir) -end - -function trixi_include(elixir::AbstractString; kwargs...) - trixi_include(Main, elixir; kwargs...) -end - """ convergence_test([mod::Module=Main,] elixir::AbstractString, iterations; kwargs...) @@ -177,112 +125,15 @@ end # Helper methods used in the functions defined above -# Apply the function `f` to `expr` and all sub-expressions recursively. -walkexpr(f, expr::Expr) = f(Expr(expr.head, (walkexpr(f, arg) for arg in expr.args)...)) -walkexpr(f, x) = f(x) - -# Insert the keyword argument `maxiters` into calls to `solve` and `Trixi.solve` -# with default value `10^5` if it is not already present. -function insert_maxiters(expr) - maxiters_default = 10^5 - - expr = walkexpr(expr) do x - if x isa Expr - is_plain_solve = x.head === Symbol("call") && x.args[1] === Symbol("solve") - is_trixi_solve = (x.head === Symbol("call") && x.args[1] isa Expr && - x.args[1].head === Symbol(".") && - x.args[1].args[1] === Symbol("Trixi") && - x.args[1].args[2] isa QuoteNode && - x.args[1].args[2].value === Symbol("solve")) - - if is_plain_solve || is_trixi_solve - # Do nothing if `maxiters` is already set as keyword argument... - for arg in x.args - # This detects the case where `maxiters` is set as keyword argument - # without or before a semicolon - if (arg isa Expr && arg.head === Symbol("kw") && - arg.args[1] === Symbol("maxiters")) - return x - end - - # This detects the case where maxiters is set as keyword argument - # after a semicolon - if (arg isa Expr && arg.head === Symbol("parameters")) - # We need to check each keyword argument listed here - for nested_arg in arg.args - if (nested_arg isa Expr && - nested_arg.head === Symbol("kw") && - nested_arg.args[1] === Symbol("maxiters")) - return x - end - end - end - end - - # ...and insert it otherwise. - push!(x.args, Expr(Symbol("kw"), Symbol("maxiters"), maxiters_default)) - end - end - return x - end - - return expr -end - -# Replace assignments to `key` in `expr` by `key = val` for all `(key,val)` in `kwargs`. -function replace_assignments(expr; kwargs...) - # replace explicit and keyword assignments - expr = walkexpr(expr) do x - if x isa Expr - for (key, val) in kwargs - if (x.head === Symbol("=") || x.head === :kw) && - x.args[1] === Symbol(key) - x.args[2] = :($val) - # dump(x) - end - end - end - return x - end - - return expr -end - -# find a (keyword or common) assignment to `destination` in `expr` -# and return the assigned value -function find_assignment(expr, destination) - # declare result to be able to assign to it in the closure - local result - found = false - - # find explicit and keyword assignments - walkexpr(expr) do x - if x isa Expr - if (x.head === Symbol("=") || x.head === :kw) && - x.args[1] === Symbol(destination) - result = x.args[2] - found = true - # dump(x) - end - end - return x - end - - if !found - throw(ArgumentError("assignment `$destination` not found in expression")) - end - - result -end - -# searches the parameter that specifies the mesh reslution in the elixir +# Searches for the assignment that specifies the mesh resolution in the elixir function extract_initial_resolution(elixir, kwargs) code = read(elixir, String) expr = Meta.parse("begin \n$code \nend") try # get the initial_refinement_level from the elixir - initial_refinement_level = find_assignment(expr, :initial_refinement_level) + initial_refinement_level = TrixiBase.find_assignment(expr, + :initial_refinement_level) if haskey(kwargs, :initial_refinement_level) return kwargs[:initial_refinement_level] @@ -294,7 +145,8 @@ function extract_initial_resolution(elixir, kwargs) if isa(e, ArgumentError) try # get cells_per_dimension from the elixir - cells_per_dimension = eval(find_assignment(expr, :cells_per_dimension)) + cells_per_dimension = eval(TrixiBase.find_assignment(expr, + :cells_per_dimension)) if haskey(kwargs, :cells_per_dimension) return kwargs[:cells_per_dimension] diff --git a/src/auxiliary/t8code.jl b/src/auxiliary/t8code.jl index bd781b21c1e..7c1399fc803 100644 --- a/src/auxiliary/t8code.jl +++ b/src/auxiliary/t8code.jl @@ -35,7 +35,7 @@ function init_t8code() # production runs this is not mandatory, but is helpful during # development. Hence, this option is only activated when environment # variable TRIXI_T8CODE_SC_FINALIZE exists. - @warn "T8code.jl: sc_finalize will be called during shutdown of Trixi.jl." + @info "T8code.jl: `sc_finalize` will be called during shutdown of Trixi.jl." MPI.add_finalize_hook!(T8code.Libt8.sc_finalize) end else @@ -46,301 +46,6 @@ function init_t8code() return nothing end -function trixi_t8_unref_forest(forest) - t8_forest_unref(Ref(forest)) -end - -function t8_free(ptr) - T8code.Libt8.sc_free(t8_get_package_id(), ptr) -end - -function trixi_t8_count_interfaces(forest) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(forest) != 0 - - # Get the number of local elements of forest. - num_local_elements = t8_forest_get_local_num_elements(forest) - # Get the number of ghost elements of forest. - num_ghost_elements = t8_forest_get_num_ghosts(forest) - # Get the number of trees that have elements of this process. - num_local_trees = t8_forest_get_num_local_trees(forest) - - current_index = t8_locidx_t(0) - - local_num_conform = 0 - local_num_mortars = 0 - local_num_boundary = 0 - - for itree in 0:(num_local_trees - 1) - tree_class = t8_forest_get_tree_class(forest, itree) - eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class) - - # Get the number of elements of this tree. - num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree) - - for ielement in 0:(num_elements_in_tree - 1) - element = t8_forest_get_element_in_tree(forest, itree, ielement) - - level = t8_element_level(eclass_scheme, element) - - num_faces = t8_element_num_faces(eclass_scheme, element) - - for iface in 0:(num_faces - 1) - pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() - pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() - pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() - - dual_faces_ref = Ref{Ptr{Cint}}() - num_neighbors_ref = Ref{Cint}() - - forest_is_balanced = Cint(1) - - t8_forest_leaf_face_neighbors(forest, itree, element, - pneighbor_leafs_ref, iface, dual_faces_ref, - num_neighbors_ref, - pelement_indices_ref, pneigh_scheme_ref, - forest_is_balanced) - - num_neighbors = num_neighbors_ref[] - neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], - num_neighbors) - neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) - neighbor_scheme = pneigh_scheme_ref[] - - if num_neighbors > 0 - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) - - # Conforming interface: The second condition ensures we only visit the interface once. - if level == neighbor_level && current_index <= neighbor_ielements[1] - local_num_conform += 1 - elseif level < neighbor_level - local_num_mortars += 1 - end - - else - local_num_boundary += 1 - end - - t8_free(dual_faces_ref[]) - t8_free(pneighbor_leafs_ref[]) - t8_free(pelement_indices_ref[]) - end # for - - current_index += 1 - end # for - end # for - - return (interfaces = local_num_conform, - mortars = local_num_mortars, - boundaries = local_num_boundary) -end - -function trixi_t8_fill_mesh_info(forest, elements, interfaces, mortars, boundaries, - boundary_names) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(forest) != 0 - - # Get the number of local elements of forest. - num_local_elements = t8_forest_get_local_num_elements(forest) - # Get the number of ghost elements of forest. - num_ghost_elements = t8_forest_get_num_ghosts(forest) - # Get the number of trees that have elements of this process. - num_local_trees = t8_forest_get_num_local_trees(forest) - - current_index = t8_locidx_t(0) - - local_num_conform = 0 - local_num_mortars = 0 - local_num_boundary = 0 - - for itree in 0:(num_local_trees - 1) - tree_class = t8_forest_get_tree_class(forest, itree) - eclass_scheme = t8_forest_get_eclass_scheme(forest, tree_class) - - # Get the number of elements of this tree. - num_elements_in_tree = t8_forest_get_tree_num_elements(forest, itree) - - for ielement in 0:(num_elements_in_tree - 1) - element = t8_forest_get_element_in_tree(forest, itree, ielement) - - level = t8_element_level(eclass_scheme, element) - - num_faces = t8_element_num_faces(eclass_scheme, element) - - for iface in 0:(num_faces - 1) - - # Compute the `orientation` of the touching faces. - if t8_element_is_root_boundary(eclass_scheme, element, iface) == 1 - cmesh = t8_forest_get_cmesh(forest) - itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(forest, itree) - iface_in_tree = t8_element_tree_face(eclass_scheme, element, iface) - orientation_ref = Ref{Cint}() - - t8_cmesh_get_face_neighbor(cmesh, itree_in_cmesh, iface_in_tree, C_NULL, - orientation_ref) - orientation = orientation_ref[] - else - orientation = zero(Cint) - end - - pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() - pneighbor_leafs_ref = Ref{Ptr{Ptr{t8_element}}}() - pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() - - dual_faces_ref = Ref{Ptr{Cint}}() - num_neighbors_ref = Ref{Cint}() - - forest_is_balanced = Cint(1) - - t8_forest_leaf_face_neighbors(forest, itree, element, - pneighbor_leafs_ref, iface, dual_faces_ref, - num_neighbors_ref, - pelement_indices_ref, pneigh_scheme_ref, - forest_is_balanced) - - num_neighbors = num_neighbors_ref[] - dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) - neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], - num_neighbors) - neighbor_leafs = unsafe_wrap(Array, pneighbor_leafs_ref[], num_neighbors) - neighbor_scheme = pneigh_scheme_ref[] - - if num_neighbors > 0 - neighbor_level = t8_element_level(neighbor_scheme, neighbor_leafs[1]) - - # Conforming interface: The second condition ensures we only visit the interface once. - if level == neighbor_level && current_index <= neighbor_ielements[1] - local_num_conform += 1 - - faces = (iface, dual_faces[1]) - interface_id = local_num_conform - - # Write data to interfaces container. - interfaces.neighbor_ids[1, interface_id] = current_index + 1 - interfaces.neighbor_ids[2, interface_id] = neighbor_ielements[1] + 1 - - # Iterate over primary and secondary element. - for side in 1:2 - # Align interface in positive coordinate direction of primary element. - # For orientation == 1, the secondary element needs to be indexed backwards - # relative to the interface. - if side == 1 || orientation == 0 - # Forward indexing - indexing = :i_forward - else - # Backward indexing - indexing = :i_backward - end - - if faces[side] == 0 - # Index face in negative x-direction - interfaces.node_indices[side, interface_id] = (:begin, - indexing) - elseif faces[side] == 1 - # Index face in positive x-direction - interfaces.node_indices[side, interface_id] = (:end, - indexing) - elseif faces[side] == 2 - # Index face in negative y-direction - interfaces.node_indices[side, interface_id] = (indexing, - :begin) - else # faces[side] == 3 - # Index face in positive y-direction - interfaces.node_indices[side, interface_id] = (indexing, - :end) - end - end - - # Non-conforming interface. - elseif level < neighbor_level - local_num_mortars += 1 - - faces = (dual_faces[1], iface) - - mortar_id = local_num_mortars - - # Last entry is the large element. - mortars.neighbor_ids[end, mortar_id] = current_index + 1 - - # First `1:end-1` entries are the smaller elements. - mortars.neighbor_ids[1:(end - 1), mortar_id] .= neighbor_ielements .+ - 1 - - for side in 1:2 - # Align mortar in positive coordinate direction of small side. - # For orientation == 1, the large side needs to be indexed backwards - # relative to the mortar. - if side == 1 || orientation == 0 - # Forward indexing for small side or orientation == 0. - indexing = :i_forward - else - # Backward indexing for large side with reversed orientation. - indexing = :i_backward - # Since the orientation is reversed we have to account for this - # when filling the `neighbor_ids` array. - mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[2] + - 1 - mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[1] + - 1 - end - - if faces[side] == 0 - # Index face in negative x-direction - mortars.node_indices[side, mortar_id] = (:begin, indexing) - elseif faces[side] == 1 - # Index face in positive x-direction - mortars.node_indices[side, mortar_id] = (:end, indexing) - elseif faces[side] == 2 - # Index face in negative y-direction - mortars.node_indices[side, mortar_id] = (indexing, :begin) - else # faces[side] == 3 - # Index face in positive y-direction - mortars.node_indices[side, mortar_id] = (indexing, :end) - end - end - - # else: "level > neighbor_level" is skipped since we visit the mortar interface only once. - end - - # Domain boundary. - else - local_num_boundary += 1 - boundary_id = local_num_boundary - - boundaries.neighbor_ids[boundary_id] = current_index + 1 - - if iface == 0 - # Index face in negative x-direction. - boundaries.node_indices[boundary_id] = (:begin, :i_forward) - elseif iface == 1 - # Index face in positive x-direction. - boundaries.node_indices[boundary_id] = (:end, :i_forward) - elseif iface == 2 - # Index face in negative y-direction. - boundaries.node_indices[boundary_id] = (:i_forward, :begin) - else # iface == 3 - # Index face in positive y-direction. - boundaries.node_indices[boundary_id] = (:i_forward, :end) - end - - # One-based indexing. - boundaries.name[boundary_id] = boundary_names[iface + 1, itree + 1] - end - - t8_free(dual_faces_ref[]) - t8_free(pneighbor_leafs_ref[]) - t8_free(pelement_indices_ref[]) - end # for iface = ... - - current_index += 1 - end # for - end # for - - return (interfaces = local_num_conform, - mortars = local_num_mortars, - boundaries = local_num_boundary) -end - function trixi_t8_get_local_element_levels(forest) # Check that forest is a committed, that is valid and usable, forest. @assert t8_forest_is_committed(forest) != 0 @@ -412,21 +117,16 @@ function adapt_callback(forest, end function trixi_t8_adapt_new(old_forest, indicators) - # Check that forest is a committed, that is valid and usable, forest. - @assert t8_forest_is_committed(old_forest) != 0 - - # Init new forest. new_forest_ref = Ref{t8_forest_t}() t8_forest_init(new_forest_ref) new_forest = new_forest_ref[] - let set_from = C_NULL, recursive = 0, set_for_coarsening = 0, no_repartition = 0 + let set_from = C_NULL, recursive = 0, no_repartition = 1, do_ghost = 1 t8_forest_set_user_data(new_forest, pointer(indicators)) t8_forest_set_adapt(new_forest, old_forest, @t8_adapt_callback(adapt_callback), recursive) t8_forest_set_balance(new_forest, set_from, no_repartition) - t8_forest_set_partition(new_forest, set_from, set_for_coarsening) - t8_forest_set_ghost(new_forest, 1, T8_GHOST_FACES) # Note: MPI support not available yet so it is a dummy call. + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) t8_forest_commit(new_forest) end diff --git a/src/callbacks_stage/subcell_bounds_check.jl b/src/callbacks_stage/subcell_bounds_check.jl index d7e30ab1621..4dbf44d29c4 100644 --- a/src/callbacks_stage/subcell_bounds_check.jl +++ b/src/callbacks_stage/subcell_bounds_check.jl @@ -97,6 +97,9 @@ function init_callback(callback::BoundsCheckCallback, semi, limiter::SubcellLimi end print(f, ", " * string(variables[v]) * "_min") end + for variable in limiter.positivity_variables_nonlinear + print(f, ", " * string(variable) * "_min") + end end println(f) end @@ -118,7 +121,7 @@ end @inline function finalize_callback(callback::BoundsCheckCallback, semi, limiter::SubcellLimiterIDP) (; local_minmax, positivity) = limiter - (; idp_bounds_delta) = limiter.cache + (; idp_bounds_delta_global) = limiter.cache variables = varnames(cons2cons, semi.equations) println("─"^100) @@ -128,8 +131,10 @@ end for v in limiter.local_minmax_variables_cons v_string = string(v) println("$(variables[v]):") - println("-lower bound: ", idp_bounds_delta[Symbol(v_string, "_min")][2]) - println("-upper bound: ", idp_bounds_delta[Symbol(v_string, "_max")][2]) + println("- lower bound: ", + idp_bounds_delta_global[Symbol(v_string, "_min")]) + println("- upper bound: ", + idp_bounds_delta_global[Symbol(v_string, "_max")]) end end if positivity @@ -138,7 +143,12 @@ end continue end println(string(variables[v]) * ":\n- positivity: ", - idp_bounds_delta[Symbol(string(v), "_min")][2]) + idp_bounds_delta_global[Symbol(string(v), "_min")]) + end + for variable in limiter.positivity_variables_nonlinear + variable_string = string(variable) + println(variable_string * ":\n- positivity: ", + idp_bounds_delta_global[Symbol(variable_string, "_min")]) end end println("─"^100 * "\n") diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl index d52eb6edb9e..19d73968c9a 100644 --- a/src/callbacks_stage/subcell_bounds_check_2d.jl +++ b/src/callbacks_stage/subcell_bounds_check_2d.jl @@ -10,26 +10,37 @@ time, iter, output_directory, save_errors) (; local_minmax, positivity) = solver.volume_integral.limiter (; variable_bounds) = limiter.cache.subcell_limiter_coefficients - (; idp_bounds_delta) = limiter.cache + (; idp_bounds_delta_local, idp_bounds_delta_global) = limiter.cache + + # Note: Accessing the threaded memory vector `idp_bounds_delta_local` with + # `deviation = idp_bounds_delta_local[key][Threads.threadid()]` causes critical performance + # issues due to False Sharing. + # Initializing a vector with n times the length and using every n-th entry fixes this + # problem and allows proper scaling: + # `deviation = idp_bounds_delta_local[key][n * Threads.threadid()]` + # Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)` + stride_size = div(128, sizeof(eltype(u))) # = n if local_minmax for v in limiter.local_minmax_variables_cons v_string = string(v) key_min = Symbol(v_string, "_min") key_max = Symbol(v_string, "_max") - deviation_min = idp_bounds_delta[key_min] - deviation_max = idp_bounds_delta[key_max] - for element in eachelement(solver, cache), j in eachnode(solver), - i in eachnode(solver) - - var = u[v, i, j, element] - deviation_min[1] = max(deviation_min[1], - variable_bounds[key_min][i, j, element] - var) - deviation_max[1] = max(deviation_max[1], - var - variable_bounds[key_max][i, j, element]) + deviation_min_threaded = idp_bounds_delta_local[key_min] + deviation_max_threaded = idp_bounds_delta_local[key_max] + @threaded for element in eachelement(solver, cache) + deviation_min = deviation_min_threaded[stride_size * Threads.threadid()] + deviation_max = deviation_max_threaded[stride_size * Threads.threadid()] + for j in eachnode(solver), i in eachnode(solver) + var = u[v, i, j, element] + deviation_min = max(deviation_min, + variable_bounds[key_min][i, j, element] - var) + deviation_max = max(deviation_max, + var - variable_bounds[key_max][i, j, element]) + end + deviation_min_threaded[stride_size * Threads.threadid()] = deviation_min + deviation_max_threaded[stride_size * Threads.threadid()] = deviation_max end - deviation_min[2] = max(deviation_min[2], deviation_min[1]) - deviation_max[2] = max(deviation_max[2], deviation_max[1]) end end if positivity @@ -38,17 +49,42 @@ continue end key = Symbol(string(v), "_min") - deviation = idp_bounds_delta[key] - for element in eachelement(solver, cache), j in eachnode(solver), - i in eachnode(solver) - - var = u[v, i, j, element] - deviation[1] = max(deviation[1], - variable_bounds[key][i, j, element] - var) + deviation_threaded = idp_bounds_delta_local[key] + @threaded for element in eachelement(solver, cache) + deviation = deviation_threaded[stride_size * Threads.threadid()] + for j in eachnode(solver), i in eachnode(solver) + var = u[v, i, j, element] + deviation = max(deviation, + variable_bounds[key][i, j, element] - var) + end + deviation_threaded[stride_size * Threads.threadid()] = deviation end - deviation[2] = max(deviation[2], deviation[1]) end + for variable in limiter.positivity_variables_nonlinear + key = Symbol(string(variable), "_min") + deviation_threaded = idp_bounds_delta_local[key] + @threaded for element in eachelement(solver, cache) + deviation = deviation_threaded[stride_size * Threads.threadid()] + for j in eachnode(solver), i in eachnode(solver) + var = variable(get_node_vars(u, equations, solver, i, j, element), + equations) + deviation = max(deviation, + variable_bounds[key][i, j, element] - var) + end + deviation_threaded[stride_size * Threads.threadid()] = deviation + end + end + end + + for (key, _) in idp_bounds_delta_local + # Calculate maximum deviations of all threads + idp_bounds_delta_local[key][stride_size] = maximum(idp_bounds_delta_local[key][stride_size * i] + for i in 1:Threads.nthreads()) + # Update global maximum deviations + idp_bounds_delta_global[key] = max(idp_bounds_delta_global[key], + idp_bounds_delta_local[key][stride_size]) end + if save_errors # Print to output file open("$output_directory/deviations.txt", "a") do f @@ -56,8 +92,10 @@ if local_minmax for v in limiter.local_minmax_variables_cons v_string = string(v) - print(f, ", ", idp_bounds_delta[Symbol(v_string, "_min")][1], ", ", - idp_bounds_delta[Symbol(v_string, "_max")][1]) + print(f, ", ", + idp_bounds_delta_local[Symbol(v_string, "_min")][stride_size], + ", ", + idp_bounds_delta_local[Symbol(v_string, "_max")][stride_size]) end end if positivity @@ -65,14 +103,21 @@ if v in limiter.local_minmax_variables_cons continue end - print(f, ", ", idp_bounds_delta[Symbol(string(v), "_min")][1]) + print(f, ", ", + idp_bounds_delta_local[Symbol(string(v), "_min")][stride_size]) + end + for variable in limiter.positivity_variables_nonlinear + print(f, ", ", + idp_bounds_delta_local[Symbol(string(variable), "_min")][stride_size]) end end println(f) end - # Reset first entries of idp_bounds_delta - for (key, _) in idp_bounds_delta - idp_bounds_delta[key][1] = zero(eltype(idp_bounds_delta[key][1])) + # Reset local maximum deviations + for (key, _) in idp_bounds_delta_local + for i in 1:Threads.nthreads() + idp_bounds_delta_local[key][stride_size * i] = zero(eltype(idp_bounds_delta_local[key][stride_size])) + end end end diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 5854c8617c3..6f57d6647fc 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -726,7 +726,7 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, return has_changed end -function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::SerialT8codeMesh, +function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::T8codeMesh, equations, dg::DG, cache, semi, t, iter; only_refine = false, only_coarsen = false, @@ -754,29 +754,29 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::SerialT8codeMe @trixi_timeit timer() "adapt" begin difference = @trixi_timeit timer() "mesh" trixi_t8_adapt!(mesh, indicators) - @trixi_timeit timer() "solver" adapt!(u_ode, adaptor, mesh, equations, dg, - cache, difference) - end + # Store whether there were any cells coarsened or refined and perform load balancing. + has_changed = any(difference .!= 0) - # Store whether there were any cells coarsened or refined and perform load balancing. - has_changed = any(difference .!= 0) + # Check if mesh changed on other processes + if mpi_isparallel() + has_changed = MPI.Allreduce!(Ref(has_changed), |, mpi_comm())[] + end - # TODO: T8codeMesh for MPI not implemented yet. - # Check if mesh changed on other processes - # if mpi_isparallel() - # has_changed = MPI.Allreduce!(Ref(has_changed), |, mpi_comm())[] - # end + if has_changed + @trixi_timeit timer() "solver" adapt!(u_ode, adaptor, mesh, equations, dg, + cache, difference) + end + end if has_changed - # TODO: T8codeMesh for MPI not implemented yet. - # if mpi_isparallel() && amr_callback.dynamic_load_balancing - # @trixi_timeit timer() "dynamic load balancing" begin - # global_first_quadrant = unsafe_wrap(Array, mesh.p4est.global_first_quadrant, mpi_nranks() + 1) - # old_global_first_quadrant = copy(global_first_quadrant) - # partition!(mesh) - # rebalance_solver!(u_ode, mesh, equations, dg, cache, old_global_first_quadrant) - # end - # end + if mpi_isparallel() && amr_callback.dynamic_load_balancing + @trixi_timeit timer() "dynamic load balancing" begin + old_global_first_element_ids = get_global_first_element_ids(mesh) + partition!(mesh) + rebalance_solver!(u_ode, mesh, equations, dg, cache, + old_global_first_element_ids) + end + end reinitialize_boundaries!(semi.boundary_conditions, cache) end diff --git a/src/callbacks_step/amr_dg.jl b/src/callbacks_step/amr_dg.jl index 1dcfdccdea8..0a7055af409 100644 --- a/src/callbacks_step/amr_dg.jl +++ b/src/callbacks_step/amr_dg.jl @@ -6,11 +6,14 @@ #! format: noindent # Redistribute data for load balancing after partitioning the mesh -function rebalance_solver!(u_ode::AbstractVector, mesh::ParallelP4estMesh, equations, +function rebalance_solver!(u_ode::AbstractVector, + mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, + equations, dg::DGSEM, cache, old_global_first_quadrant) - # mpi ranks are 0-based, this array uses 1-based indices - global_first_quadrant = unsafe_wrap(Array, mesh.p4est.global_first_quadrant, - mpi_nranks() + 1) + + # MPI ranks are 0-based. This array uses 1-based indices. + global_first_quadrant = get_global_first_element_ids(mesh) + if global_first_quadrant[mpi_rank() + 1] == old_global_first_quadrant[mpi_rank() + 1] && global_first_quadrant[mpi_rank() + 2] == diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index 98e531295b7..94524b23a3a 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -385,7 +385,12 @@ function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{2}, equations, # Return early if there is nothing to do. if !any(difference .!= 0) - return nothing + if mpi_isparallel() + # MPICache init uses all-to-all communication -> reinitialize even if there is nothing to do + # locally (there still might be other MPI ranks that have refined elements) + reinitialize_containers!(mesh, equations, dg, cache) + end + return end # Number of (local) cells/elements. @@ -396,7 +401,7 @@ function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{2}, equations, old_index = 1 new_index = 1 - # Note: This is true for `quads` only. + # Note: This is true for `quads`. T8_CHILDREN = 4 # Retain current solution data. diff --git a/src/callbacks_step/amr_dg3d.jl b/src/callbacks_step/amr_dg3d.jl index c8abe6fdb05..3f67951bafe 100644 --- a/src/callbacks_step/amr_dg3d.jl +++ b/src/callbacks_step/amr_dg3d.jl @@ -304,9 +304,89 @@ end # this method is called when an `ControllerThreeLevel` is constructed function create_cache(::Type{ControllerThreeLevel}, - mesh::Union{TreeMesh{3}, P4estMesh{3}}, + mesh::Union{TreeMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations, dg::DG, cache) controller_value = Vector{Int}(undef, nelements(dg, cache)) return (; controller_value) end + +# Coarsen and refine elements in the DG solver based on a difference list. +function adapt!(u_ode::AbstractVector, adaptor, mesh::T8codeMesh{3}, equations, + dg::DGSEM, cache, difference) + + # Return early if there is nothing to do. + if !any(difference .!= 0) + if mpi_isparallel() + # MPICache init uses all-to-all communication -> reinitialize even if there is nothing to do + # locally (there still might be other MPI ranks that have refined elements) + reinitialize_containers!(mesh, equations, dg, cache) + end + return + end + + # Number of (local) cells/elements. + old_nelems = nelements(dg, cache) + new_nelems = ncells(mesh) + + # Local element indices. + old_index = 1 + new_index = 1 + + # Note: This is only true for `hexs`. + T8_CHILDREN = 8 + + # Retain current solution data. + old_u_ode = copy(u_ode) + + GC.@preserve old_u_ode begin + old_u = wrap_array(old_u_ode, mesh, equations, dg, cache) + + reinitialize_containers!(mesh, equations, dg, cache) + + resize!(u_ode, + nvariables(equations) * ndofs(mesh, dg, cache)) + u = wrap_array(u_ode, mesh, equations, dg, cache) + + u_tmp1 = Array{eltype(u), 4}(undef, nvariables(equations), nnodes(dg), + nnodes(dg), nnodes(dg)) + u_tmp2 = Array{eltype(u), 4}(undef, nvariables(equations), nnodes(dg), + nnodes(dg), nnodes(dg)) + + while old_index <= old_nelems && new_index <= new_nelems + if difference[old_index] > 0 # Refine. + + # Refine element and store solution directly in new data structure. + refine_element!(u, new_index, old_u, old_index, adaptor, equations, dg, + u_tmp1, u_tmp2) + + old_index += 1 + new_index += T8_CHILDREN + + elseif difference[old_index] < 0 # Coarsen. + + # If an element is to be removed, sanity check if the following elements + # are also marked - otherwise there would be an error in the way the + # cells/elements are sorted. + @assert all(difference[old_index:(old_index + T8_CHILDREN - 1)] .< 0) "bad cell/element order" + + # Coarsen elements and store solution directly in new data structure. + coarsen_elements!(u, new_index, old_u, old_index, adaptor, equations, + dg, u_tmp1, u_tmp2) + + old_index += T8_CHILDREN + new_index += 1 + + else # No changes. + + # Copy old element data to new element container. + @views u[:, .., new_index] .= old_u[:, .., old_index] + + old_index += 1 + new_index += 1 + end + end # while + end # GC.@preserve old_u_ode + + return nothing +end end # @muladd diff --git a/src/callbacks_step/analysis_dg2d_parallel.jl b/src/callbacks_step/analysis_dg2d_parallel.jl index a04bf732604..000daa015dc 100644 --- a/src/callbacks_step/analysis_dg2d_parallel.jl +++ b/src/callbacks_step/analysis_dg2d_parallel.jl @@ -91,7 +91,8 @@ function calc_error_norms_per_element(func, u, t, analyzer, end function calc_error_norms(func, u, t, analyzer, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, + equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @unpack node_coordinates, inverse_jacobian = cache.elements @@ -171,7 +172,8 @@ function integrate_via_indices(func::Func, u, end function integrate_via_indices(func::Func, u, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, + equations, dg::DGSEM, cache, args...; normalize = true) where {Func} @unpack weights = dg.basis diff --git a/src/callbacks_step/analysis_dg3d.jl b/src/callbacks_step/analysis_dg3d.jl index 81d0795a159..27e8a2b722f 100644 --- a/src/callbacks_step/analysis_dg3d.jl +++ b/src/callbacks_step/analysis_dg3d.jl @@ -35,7 +35,9 @@ function create_cache_analysis(analyzer, mesh::TreeMesh{3}, return (; u_local, u_tmp1, u_tmp2, x_local, x_tmp1, x_tmp2) end -function create_cache_analysis(analyzer, mesh::Union{StructuredMesh{3}, P4estMesh{3}}, +function create_cache_analysis(analyzer, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, dg::DG, cache, RealT, uEltype) @@ -118,7 +120,7 @@ function calc_error_norms(func, u, t, analyzer, end function calc_error_norms(func, u, t, analyzer, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @@ -190,7 +192,8 @@ function integrate_via_indices(func::Func, u, end function integrate_via_indices(func::Func, u, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, dg::DGSEM, cache, args...; normalize = true) where {Func} @unpack weights = dg.basis @@ -218,7 +221,8 @@ function integrate_via_indices(func::Func, u, end function integrate(func::Func, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, dg::DG, cache; normalize = true) where {Func} integrate_via_indices(u, mesh, equations, dg, cache; normalize = normalize) do u, i, j, k, element, equations, dg @@ -248,7 +252,8 @@ function integrate(func::Func, u, end function analyze(::typeof(entropy_timederivative), du, u, t, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, dg::DG, cache) # Calculate ∫(∂S/∂u ⋅ ∂u/∂t)dΩ integrate_via_indices(u, mesh, equations, dg, cache, @@ -277,7 +282,7 @@ function analyze(::Val{:l2_divb}, du, u, t, end function analyze(::Val{:l2_divb}, du, u, t, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations::IdealGlmMhdEquations3D, dg::DGSEM, cache) @unpack contravariant_vectors = cache.elements @@ -333,7 +338,7 @@ function analyze(::Val{:linf_divb}, du, u, t, end function analyze(::Val{:linf_divb}, du, u, t, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations::IdealGlmMhdEquations3D, dg::DGSEM, cache) @unpack derivative_matrix, weights = dg.basis diff --git a/src/callbacks_step/analysis_dg3d_parallel.jl b/src/callbacks_step/analysis_dg3d_parallel.jl index d8756d91c9d..de777be406d 100644 --- a/src/callbacks_step/analysis_dg3d_parallel.jl +++ b/src/callbacks_step/analysis_dg3d_parallel.jl @@ -6,7 +6,8 @@ #! format: noindent function calc_error_norms(func, u, t, analyzer, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, + equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @unpack node_coordinates, inverse_jacobian = cache.elements @@ -64,7 +65,8 @@ function calc_error_norms(func, u, t, analyzer, end function integrate_via_indices(func::Func, u, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, + equations, dg::DGSEM, cache, args...; normalize = true) where {Func} @unpack weights = dg.basis diff --git a/src/callbacks_step/stepsize_dg2d.jl b/src/callbacks_step/stepsize_dg2d.jl index 673c3ba6aa6..c6d32c0f6dc 100644 --- a/src/callbacks_step/stepsize_dg2d.jl +++ b/src/callbacks_step/stepsize_dg2d.jl @@ -174,4 +174,36 @@ function max_dt(u, t, mesh::ParallelP4estMesh{2}, return dt end + +function max_dt(u, t, mesh::ParallelT8codeMesh{2}, + constant_speed::False, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{2}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{2}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end + +function max_dt(u, t, mesh::ParallelT8codeMesh{2}, + constant_speed::True, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{2}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{2}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end end # @muladd diff --git a/src/callbacks_step/stepsize_dg3d.jl b/src/callbacks_step/stepsize_dg3d.jl index c9ab7c478a8..664596f989e 100644 --- a/src/callbacks_step/stepsize_dg3d.jl +++ b/src/callbacks_step/stepsize_dg3d.jl @@ -44,7 +44,7 @@ function max_dt(u, t, mesh::TreeMesh{3}, return 2 / (nnodes(dg) * max_scaled_speed) end -function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}}, +function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, constant_speed::False, equations, dg::DG, cache) # to avoid a division by zero if the speed vanishes everywhere, # e.g. for steady-state linear advection @@ -82,7 +82,7 @@ function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}}, return 2 / (nnodes(dg) * max_scaled_speed) end -function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}}, +function max_dt(u, t, mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, constant_speed::True, equations, dg::DG, cache) # to avoid a division by zero if the speed vanishes everywhere, # e.g. for steady-state linear advection @@ -150,4 +150,36 @@ function max_dt(u, t, mesh::ParallelP4estMesh{3}, return dt end + +function max_dt(u, t, mesh::ParallelT8codeMesh{3}, + constant_speed::False, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{3}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{3}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end + +function max_dt(u, t, mesh::ParallelT8codeMesh{3}, + constant_speed::True, equations, dg::DG, cache) + # call the method accepting a general `mesh::T8codeMesh{3}` + # TODO: MPI, we should improve this; maybe we should dispatch on `u` + # and create some MPI array type, overloading broadcasting and mapreduce etc. + # Then, this specific array type should also work well with DiffEq etc. + dt = invoke(max_dt, + Tuple{typeof(u), typeof(t), T8codeMesh{3}, + typeof(constant_speed), typeof(equations), typeof(dg), + typeof(cache)}, + u, t, mesh, constant_speed, equations, dg, cache) + dt = MPI.Allreduce!(Ref(dt), min, mpi_comm())[] + + return dt +end end # @muladd diff --git a/src/callbacks_step/trivial.jl b/src/callbacks_step/trivial.jl index a55b7d85b13..fb93cf96c0c 100644 --- a/src/callbacks_step/trivial.jl +++ b/src/callbacks_step/trivial.jl @@ -8,8 +8,8 @@ """ TrivialCallback() -A callback that does nothing. This can be useful to disable some callbacks -easily via [`trixi_include`](@ref). +A callback that does nothing. This can be useful to disable some callbacks easily via +[`trixi_include`](@ref). """ function TrivialCallback() DiscreteCallback(trivial_callback, trivial_callback, diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl index b0fd5c53f45..f5a632723cf 100644 --- a/src/equations/compressible_euler_2d.jl +++ b/src/equations/compressible_euler_2d.jl @@ -809,6 +809,98 @@ end return SVector(f1m, f2m, f3m, f4m) end +""" + FluxLMARS(c)(u_ll, u_rr, orientation_or_normal_direction, + equations::CompressibleEulerEquations2D) + +Low Mach number approximate Riemann solver (LMARS) for atmospheric flows using +an estimate `c` of the speed of sound. + +References: +- Xi Chen et al. (2013) + A Control-Volume Model of the Compressible Euler Equations with a Vertical + Lagrangian Coordinate + [DOI: 10.1175/MWR-D-12-00129.1](https://doi.org/10.1175/mwr-d-12-00129.1) +""" +struct FluxLMARS{SpeedOfSound} + # Estimate for the speed of sound + speed_of_sound::SpeedOfSound +end + +@inline function (flux_lmars::FluxLMARS)(u_ll, u_rr, orientation::Integer, + equations::CompressibleEulerEquations2D) + c = flux_lmars.speed_of_sound + + # Unpack left and right state + rho_ll, v1_ll, v2_ll, p_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations) + + if orientation == 1 + v_ll = v1_ll + v_rr = v1_rr + else # orientation == 2 + v_ll = v2_ll + v_rr = v2_rr + end + + rho = 0.5 * (rho_ll + rho_rr) + p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll) + v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) + + # We treat the energy term analogous to the potential temperature term in the paper by + # Chen et al., i.e. we use p_ll and p_rr, and not p + if v >= 0 + f1, f2, f3, f4 = v * u_ll + f4 = f4 + p_ll * v + else + f1, f2, f3, f4 = v * u_rr + f4 = f4 + p_rr * v + end + + if orientation == 1 + f2 = f2 + p + else # orientation == 2 + f3 = f3 + p + end + + return SVector(f1, f2, f3, f4) +end + +@inline function (flux_lmars::FluxLMARS)(u_ll, u_rr, normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + c = flux_lmars.speed_of_sound + + # Unpack left and right state + rho_ll, v1_ll, v2_ll, p_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr, p_rr = cons2prim(u_rr, equations) + + v_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2] + v_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2] + + # Note that this is the same as computing v_ll and v_rr with a normalized normal vector + # and then multiplying v by `norm_` again, but this version is slightly faster. + norm_ = norm(normal_direction) + + rho = 0.5 * (rho_ll + rho_rr) + p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll) / norm_ + v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) * norm_ + + # We treat the energy term analogous to the potential temperature term in the paper by + # Chen et al., i.e. we use p_ll and p_rr, and not p + if v >= 0 + f1, f2, f3, f4 = u_ll * v + f4 = f4 + p_ll * v + else + f1, f2, f3, f4 = u_rr * v + f4 = f4 + p_rr * v + end + + return SVector(f1, + f2 + p * normal_direction[1], + f3 + p * normal_direction[2], + f4) +end + """ splitting_vanleer_haenel(u, orientation::Integer, equations::CompressibleEulerEquations2D) @@ -1540,6 +1632,18 @@ end return p end +# Transformation from conservative variables u to d(p)/d(u) +@inline function gradient_conservative(::typeof(pressure), + u, equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + + v1 = rho_v1 / rho + v2 = rho_v2 / rho + v_square = v1^2 + v2^2 + + return (equations.gamma - 1.0) * SVector(0.5 * v_square, -v1, -v2, 1.0) +end + @inline function density_pressure(u, equations::CompressibleEulerEquations2D) rho, rho_v1, rho_v2, rho_e = u rho_times_p = (equations.gamma - 1) * (rho * rho_e - 0.5 * (rho_v1^2 + rho_v2^2)) @@ -1607,4 +1711,13 @@ end @inline function energy_internal(cons, equations::CompressibleEulerEquations2D) return energy_total(cons, equations) - energy_kinetic(cons, equations) end + +# State validation for Newton-bisection method of subcell IDP limiting +@inline function Base.isvalid(u, equations::CompressibleEulerEquations2D) + p = pressure(u, equations) + if u[1] <= 0.0 || p <= 0.0 + return false + end + return true +end end # @muladd diff --git a/src/equations/compressible_euler_3d.jl b/src/equations/compressible_euler_3d.jl index 82c4a7efa32..292b912f009 100644 --- a/src/equations/compressible_euler_3d.jl +++ b/src/equations/compressible_euler_3d.jl @@ -944,11 +944,6 @@ References: Lagrangian Coordinate [DOI: 10.1175/MWR-D-12-00129.1](https://doi.org/10.1175/mwr-d-12-00129.1) """ -struct FluxLMARS{SpeedOfSound} - # Estimate for the speed of sound - speed_of_sound::SpeedOfSound -end - @inline function (flux_lmars::FluxLMARS)(u_ll, u_rr, orientation::Integer, equations::CompressibleEulerEquations3D) c = flux_lmars.speed_of_sound @@ -972,10 +967,14 @@ end p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll) v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) + # We treat the energy term analogous to the potential temperature term in the paper by + # Chen et al., i.e. we use p_ll and p_rr, and not p if v >= 0 f1, f2, f3, f4, f5 = v * u_ll + f5 = f5 + p_ll * v else f1, f2, f3, f4, f5 = v * u_rr + f5 = f5 + p_rr * v end if orientation == 1 @@ -985,7 +984,6 @@ end else # orientation == 3 f4 += p end - f5 += p * v return SVector(f1, f2, f3, f4, f5) end @@ -1011,18 +1009,21 @@ end p = 0.5 * (p_ll + p_rr) - 0.5 * c * rho * (v_rr - v_ll) / norm_ v = 0.5 * (v_ll + v_rr) - 1 / (2 * c * rho) * (p_rr - p_ll) * norm_ + # We treat the energy term analogous to the potential temperature term in the paper by + # Chen et al., i.e. we use p_ll and p_rr, and not p if v >= 0 f1, f2, f3, f4, f5 = v * u_ll + f5 = f5 + p_ll * v else f1, f2, f3, f4, f5 = v * u_rr + f5 = f5 + p_rr * v end - f2 += p * normal_direction[1] - f3 += p * normal_direction[2] - f4 += p * normal_direction[3] - f5 += p * v - - return SVector(f1, f2, f3, f4, f5) + return SVector(f1, + f2 + p * normal_direction[1], + f3 + p * normal_direction[2], + f4 + p * normal_direction[3], + f5) end # Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the diff --git a/src/equations/equations.jl b/src/equations/equations.jl index 7a3c326984d..65875a2a7e5 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -376,6 +376,12 @@ of the correct length `nvariables(equations)`. """ function energy_internal end +# Default implementation of gradient for `variable`. Used for subcell limiting. +# Implementing a gradient function for a specific variable improves the performance. +@inline function gradient_conservative(variable, u, equations) + return ForwardDiff.gradient(x -> variable(x, equations), u) +end + #################################################################################################### # Include files with actual implementations for different systems of equations. @@ -501,4 +507,9 @@ include("linearized_euler_2d.jl") abstract type AbstractEquationsParabolic{NDIMS, NVARS, GradientVariables} <: AbstractEquations{NDIMS, NVARS} end + +# Lighthill-Witham-Richards (LWR) traffic flow model +abstract type AbstractTrafficFlowLWREquations{NDIMS, NVARS} <: + AbstractEquations{NDIMS, NVARS} end +include("traffic_flow_lwr_1d.jl") end # @muladd diff --git a/src/equations/ideal_glm_mhd_2d.jl b/src/equations/ideal_glm_mhd_2d.jl index 43d1991e34b..4366cd32f08 100644 --- a/src/equations/ideal_glm_mhd_2d.jl +++ b/src/equations/ideal_glm_mhd_2d.jl @@ -1118,6 +1118,20 @@ end return p end +# Transformation from conservative variables u to d(p)/d(u) +@inline function gradient_conservative(::typeof(pressure), + u, equations::IdealGlmMhdEquations2D) + rho, rho_v1, rho_v2, rho_v3, rho_e, B1, B2, B3, psi = u + + v1 = rho_v1 / rho + v2 = rho_v2 / rho + v3 = rho_v3 / rho + v_square = v1^2 + v2^2 + v3^2 + + return (equations.gamma - 1.0) * + SVector(0.5 * v_square, -v1, -v2, -v3, 1.0, -B1, -B2, -B3, -psi) +end + @inline function density_pressure(u, equations::IdealGlmMhdEquations2D) rho, rho_v1, rho_v2, rho_v3, rho_e, B1, B2, B3, psi = u p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1^2 + rho_v2^2 + rho_v3^2) / rho @@ -1384,6 +1398,15 @@ end cons[9]^2 / 2) end +# State validation for Newton-bisection method of subcell IDP limiting +@inline function Base.isvalid(u, equations::IdealGlmMhdEquations2D) + p = pressure(u, equations) + if u[1] <= 0.0 || p <= 0.0 + return false + end + return true +end + # Calculate the cross helicity (\vec{v}⋅\vec{B}) for a conservative state `cons' @inline function cross_helicity(cons, ::IdealGlmMhdEquations2D) return (cons[2] * cons[6] + cons[3] * cons[7] + cons[4] * cons[8]) / cons[1] diff --git a/src/equations/polytropic_euler_2d.jl b/src/equations/polytropic_euler_2d.jl index f5d2f7b0bad..e900fd64073 100644 --- a/src/equations/polytropic_euler_2d.jl +++ b/src/equations/polytropic_euler_2d.jl @@ -301,6 +301,46 @@ end return abs(v1) + c, abs(v2) + c end +# Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the +# maximum velocity magnitude plus the maximum speed of sound +@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, + equations::PolytropicEulerEquations2D) + rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations) + + # Get the velocity value in the appropriate direction + if orientation == 1 + v_ll = v1_ll + v_rr = v1_rr + else # orientation == 2 + v_ll = v2_ll + v_rr = v2_rr + end + # Calculate sound speeds (we have p = kappa * rho^gamma) + c_ll = sqrt(equations.gamma * equations.kappa * rho_ll^(equations.gamma - 1)) + c_rr = sqrt(equations.gamma * equations.kappa * rho_rr^(equations.gamma - 1)) + + λ_max = max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr) +end + +@inline function max_abs_speed_naive(u_ll, u_rr, normal_direction::AbstractVector, + equations::PolytropicEulerEquations2D) + rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations) + + # Calculate normal velocities and sound speed (we have p = kappa * rho^gamma) + # left + v_ll = (v1_ll * normal_direction[1] + + v2_ll * normal_direction[2]) + c_ll = sqrt(equations.gamma * equations.kappa * rho_ll^(equations.gamma - 1)) + # right + v_rr = (v1_rr * normal_direction[1] + + v2_rr * normal_direction[2]) + c_rr = sqrt(equations.gamma * equations.kappa * rho_rr^(equations.gamma - 1)) + + return max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr) * norm(normal_direction) +end + # Convert conservative variables to primitive @inline function cons2prim(u, equations::PolytropicEulerEquations2D) rho, rho_v1, rho_v2 = u diff --git a/src/equations/traffic_flow_lwr_1d.jl b/src/equations/traffic_flow_lwr_1d.jl new file mode 100644 index 00000000000..a4d2613a5c8 --- /dev/null +++ b/src/equations/traffic_flow_lwr_1d.jl @@ -0,0 +1,116 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +@doc raw""" + TrafficFlowLWREquations1D + +The classic Lighthill-Witham Richards (LWR) model for 1D traffic flow. +The car density is denoted by $u \in [0, 1]$ and +the maximum possible speed (e.g. due to speed limits) is $v_{\text{max}}$. +```math +\partial_t u + v_{\text{max}} \partial_1 [u (1 - u)] = 0 +``` +For more details see e.g. Section 11.1 of +- Randall LeVeque (2002) +Finite Volume Methods for Hyperbolic Problems +[DOI: 10.1017/CBO9780511791253]https://doi.org/10.1017/CBO9780511791253 +""" +struct TrafficFlowLWREquations1D{RealT <: Real} <: AbstractTrafficFlowLWREquations{1, 1} + v_max::RealT + + function TrafficFlowLWREquations1D(v_max = 1.0) + new{typeof(v_max)}(v_max) + end +end + +varnames(::typeof(cons2cons), ::TrafficFlowLWREquations1D) = ("car-density",) +varnames(::typeof(cons2prim), ::TrafficFlowLWREquations1D) = ("car-density",) + +""" + initial_condition_convergence_test(x, t, equations::TrafficFlowLWREquations1D) + +A smooth initial condition used for convergence tests. +""" +function initial_condition_convergence_test(x, t, equations::TrafficFlowLWREquations1D) + c = 2.0 + A = 1.0 + L = 1 + f = 1 / L + omega = 2 * pi * f + scalar = c + A * sin(omega * (x[1] - t)) + + return SVector(scalar) +end + +""" + source_terms_convergence_test(u, x, t, equations::TrafficFlowLWREquations1D) + +Source terms used for convergence tests in combination with +[`initial_condition_convergence_test`](@ref). +""" +@inline function source_terms_convergence_test(u, x, t, + equations::TrafficFlowLWREquations1D) + # Same settings as in `initial_condition` + c = 2.0 + A = 1.0 + L = 1 + f = 1 / L + omega = 2 * pi * f + du = omega * cos(omega * (x[1] - t)) * + (-1 - equations.v_max * (2 * sin(omega * (x[1] - t)) + 3)) + + return SVector(du) +end + +# Calculate 1D flux in for a single point +@inline function flux(u, orientation::Integer, equations::TrafficFlowLWREquations1D) + return SVector(equations.v_max * u[1] * (1.0 - u[1])) +end + +# Calculate maximum wave speed for local Lax-Friedrichs-type dissipation +@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, + equations::TrafficFlowLWREquations1D) + λ_max = max(abs(equations.v_max * (1.0 - 2 * u_ll[1])), + abs(equations.v_max * (1.0 - 2 * u_rr[1]))) +end + +# Calculate minimum and maximum wave speeds for HLL-type fluxes +@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer, + equations::TrafficFlowLWREquations1D) + jac_L = equations.v_max * (1.0 - 2 * u_ll[1]) + jac_R = equations.v_max * (1.0 - 2 * u_rr[1]) + + λ_min = min(jac_L, jac_R) + λ_max = max(jac_L, jac_R) + + return λ_min, λ_max +end + +@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer, + equations::TrafficFlowLWREquations1D) + min_max_speed_naive(u_ll, u_rr, orientation, equations) +end + +@inline function max_abs_speeds(u, equations::TrafficFlowLWREquations1D) + return (abs(equations.v_max * (1.0 - 2 * u[1])),) +end + +# Convert conservative variables to primitive +@inline cons2prim(u, equations::TrafficFlowLWREquations1D) = u + +# Convert conservative variables to entropy variables +@inline cons2entropy(u, equations::TrafficFlowLWREquations1D) = u + +# Calculate entropy for a conservative state `cons` +@inline entropy(u::Real, ::TrafficFlowLWREquations1D) = 0.5 * u^2 +@inline entropy(u, equations::TrafficFlowLWREquations1D) = entropy(u[1], equations) + +# Calculate total energy for a conservative state `cons` +@inline energy_total(u::Real, ::TrafficFlowLWREquations1D) = 0.5 * u^2 +@inline energy_total(u, equations::TrafficFlowLWREquations1D) = energy_total(u[1], + equations) +end # @muladd diff --git a/src/meshes/p4est_mesh.jl b/src/meshes/p4est_mesh.jl index 60db285e04f..abe9d9345b5 100644 --- a/src/meshes/p4est_mesh.jl +++ b/src/meshes/p4est_mesh.jl @@ -289,7 +289,8 @@ end P4estMesh{NDIMS}(meshfile::String; mapping=nothing, polydeg=1, RealT=Float64, initial_refinement_level=0, unsaved_changes=true, - p4est_partition_allow_for_coarsening=true) + p4est_partition_allow_for_coarsening=true, + boundary_symbols = nothing) Main mesh constructor for the `P4estMesh` that imports an unstructured, conforming mesh from an Abaqus mesh file (`.inp`). Each element of the conforming mesh parsed @@ -310,8 +311,9 @@ To create a curved unstructured mesh `P4estMesh` two strategies are available: straight-sided from the information parsed from the `meshfile`. If a mapping function is specified then it computes the mapped tree coordinates via polynomial interpolants with degree `polydeg`. The mesh created by this function will only - have one boundary `:all`, as distinguishing different physical boundaries is - non-trivial. + have one boundary `:all` if `boundary_symbols` is not specified. + If `boundary_symbols` is specified the mesh file will be parsed for nodesets defining + the boundary nodes from which boundary edges (2D) and faces (3D) will be assigned. Note that the `mapping` and `polydeg` keyword arguments are only used by the `p4est_mesh_from_standard_abaqus` function. The `p4est_mesh_from_hohqmesh_abaqus` function obtains the mesh `polydeg` directly from the `meshfile` @@ -345,11 +347,14 @@ For example, if a two-dimensional base mesh contains 25 elements then setting - `p4est_partition_allow_for_coarsening::Bool`: Must be `true` when using AMR to make mesh adaptivity independent of domain partitioning. Should be `false` for static meshes to permit more fine-grained partitioning. +- `boundary_symbols::Vector{Symbol}`: A vector of symbols that correspond to the boundary names in the `meshfile`. + If `nothing` is passed then all boundaries are named `:all`. """ function P4estMesh{NDIMS}(meshfile::String; mapping = nothing, polydeg = 1, RealT = Float64, initial_refinement_level = 0, unsaved_changes = true, - p4est_partition_allow_for_coarsening = true) where {NDIMS} + p4est_partition_allow_for_coarsening = true, + boundary_symbols = nothing) where {NDIMS} # Prevent `p4est` from crashing Julia if the file doesn't exist @assert isfile(meshfile) @@ -373,7 +378,8 @@ function P4estMesh{NDIMS}(meshfile::String; polydeg, initial_refinement_level, NDIMS, - RealT) + RealT, + boundary_symbols) end return P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, @@ -444,7 +450,8 @@ end # the `mapping` passed to this function using polynomial interpolants of degree `polydeg`. All boundary # names are given the name `:all`. function p4est_mesh_from_standard_abaqus(meshfile, mapping, polydeg, - initial_refinement_level, n_dimensions, RealT) + initial_refinement_level, n_dimensions, RealT, + boundary_symbols) # Create the mesh connectivity using `p4est` connectivity = read_inp_p4est(meshfile, Val(n_dimensions)) connectivity_pw = PointerWrapper(connectivity) @@ -469,12 +476,215 @@ function p4est_mesh_from_standard_abaqus(meshfile, mapping, polydeg, p4est = new_p4est(connectivity, initial_refinement_level) - # There's no simple and generic way to distinguish boundaries. Name all of them :all. - boundary_names = fill(:all, 2 * n_dimensions, n_trees) + if boundary_symbols === nothing + # There's no simple and generic way to distinguish boundaries without any information given. + # Name all of them :all. + boundary_names = fill(:all, 2 * n_dimensions, n_trees) + else # Boundary information given + # Read in nodes belonging to boundaries + node_set_dict = parse_node_sets(meshfile, boundary_symbols) + # Read in all elements with associated nodes to specify the boundaries + element_node_matrix = parse_elements(meshfile, n_trees, n_dimensions) + + # Initialize boundary information matrix with symbol for no boundary / internal connection + boundary_names = fill(Symbol("---"), 2 * n_dimensions, n_trees) + + # Fill `boundary_names` such that it can be processed by p4est + assign_boundaries_standard_abaqus!(boundary_names, n_trees, + element_node_matrix, node_set_dict, + Val(n_dimensions)) + end return p4est, tree_node_coordinates, nodes, boundary_names end +function parse_elements(meshfile, n_trees, n_dims) + @assert n_dims in (2, 3) "Only 2D and 3D meshes are supported" + # Valid element types (that can be processed by p4est) based on dimension + element_types = n_dims == 2 ? + ["*ELEMENT, type=CPS4", "*ELEMENT, type=C2D4", + "*ELEMENT, type=S4"] : ["*ELEMENT, type=C3D8"] + # 2D quads: 4 nodes + element index, 3D hexes: 8 nodes + element index + expected_content_length = n_dims == 2 ? 5 : 9 + + element_node_matrix = Matrix{Int64}(undef, n_trees, expected_content_length - 1) + el_list_follows = false + tree_id = 1 + + open(meshfile, "r") do file + for line in eachline(file) + if any(startswith(line, el_type) for el_type in element_types) + el_list_follows = true + elseif el_list_follows + content = split(line, ",") + if length(content) == expected_content_length # Check that we still read in connectivity data + content_int = parse.(Int64, content) + # Add constituent nodes to the element_node_matrix. + # Important: Do not use index from the Abaqus file, but the one from p4est. + element_node_matrix[tree_id, :] = content_int[2:end] # First entry is element id + tree_id += 1 + else # Processed all elements for this ELSET + el_list_follows = false + end + end + end + end + + return element_node_matrix +end + +function parse_node_sets(meshfile, boundary_symbols) + nodes_dict = Dict{Symbol, Vector{Int64}}() + current_symbol = nothing + current_nodes = Int64[] + + open(meshfile, "r") do file + for line in eachline(file) + # Check if the line contains nodes assembled in a special set, i.e., a physical boundary + if startswith(line, "*NSET,NSET=") + # Safe the previous nodeset + if current_symbol !== nothing + nodes_dict[current_symbol] = current_nodes + end + + current_symbol = Symbol(split(line, "=")[2]) + if current_symbol in boundary_symbols + # New nodeset + current_nodes = Int64[] + else # Read only boundary node sets + current_symbol = nothing + end + elseif current_symbol !== nothing # Read only if there was already a nodeset specified + try # Check if line contains nodes + # There is always a trailing comma, remove the corresponding empty string + append!(current_nodes, parse.(Int64, split(line, ",")[1:(end - 1)])) + catch # Something different, stop reading in nodes + # If parsing fails, set current_symbol to nothing + nodes_dict[current_symbol] = current_nodes + current_symbol = nothing + end + end + end + # Safe the previous nodeset + if current_symbol !== nothing + nodes_dict[current_symbol] = current_nodes + end + end + + for symbol in boundary_symbols + if !haskey(nodes_dict, symbol) + @warn "No nodes found for nodeset :" * "$symbol" * " !" + end + end + + return nodes_dict +end + +# This function assigns the edges of elements to boundaries by +# checking if the nodes that define the edges are part of nodesets which correspond to boundaries. +function assign_boundaries_standard_abaqus!(boundary_names, n_trees, + element_node_matrix, node_set_dict, + ::Val{2}) # 2D version + for tree in 1:n_trees + tree_nodes = element_node_matrix[tree, :] + # For node labeling, see + # https://docs.software.vt.edu/abaqusv2022/English/SIMACAEELMRefMap/simaelm-r-2delem.htm#simaelm-r-2delem-t-nodedef1 + # and search for "Node ordering and face numbering on elements" + for boundary in keys(node_set_dict) # Loop over specified boundaries + # Check bottom edge + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[2] in node_set_dict[boundary] + # Bottom boundary is position 3 in p4est indexing + boundary_names[3, tree] = boundary + end + # Check right edge + if tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[3] in node_set_dict[boundary] + # Right boundary is position 2 in p4est indexing + boundary_names[2, tree] = boundary + end + # Check top edge + if tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] + # Top boundary is position 4 in p4est indexing + boundary_names[4, tree] = boundary + end + # Check left edge + if tree_nodes[4] in node_set_dict[boundary] && + tree_nodes[1] in node_set_dict[boundary] + # Left boundary is position 1 in p4est indexing + boundary_names[1, tree] = boundary + end + end + end + + return boundary_names +end + +# This function assigns the edges of elements to boundaries by +# checking if the nodes that define the faces are part of nodesets which correspond to boundaries. +function assign_boundaries_standard_abaqus!(boundary_names, n_trees, + element_node_matrix, node_set_dict, + ::Val{3}) # 3D version + for tree in 1:n_trees + tree_nodes = element_node_matrix[tree, :] + # For node labeling, see + # https://web.mit.edu/calculix_v2.7/CalculiX/ccx_2.7/doc/ccx/node26.html + for boundary in keys(node_set_dict) # Loop over specified boundaries + # Check "front face" (y_min) + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[5] in node_set_dict[boundary] && + tree_nodes[6] in node_set_dict[boundary] + # Front face is position 3 in p4est indexing + boundary_names[3, tree] = boundary + end + # Check "back face" (y_max) + if tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] && + tree_nodes[7] in node_set_dict[boundary] && + tree_nodes[8] in node_set_dict[boundary] + # Front face is position 4 in p4est indexing + boundary_names[4, tree] = boundary + end + # Check "left face" (x_min) + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] && + tree_nodes[5] in node_set_dict[boundary] && + tree_nodes[8] in node_set_dict[boundary] + # Left face is position 1 in p4est indexing + boundary_names[1, tree] = boundary + end + # Check "right face" (x_max) + if tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[6] in node_set_dict[boundary] && + tree_nodes[7] in node_set_dict[boundary] + # Right face is position 2 in p4est indexing + boundary_names[2, tree] = boundary + end + # Check "bottom face" (z_min) + if tree_nodes[1] in node_set_dict[boundary] && + tree_nodes[2] in node_set_dict[boundary] && + tree_nodes[3] in node_set_dict[boundary] && + tree_nodes[4] in node_set_dict[boundary] + # Bottom face is position 5 in p4est indexing + boundary_names[5, tree] = boundary + end + # Check "top face" (z_max) + if tree_nodes[5] in node_set_dict[boundary] && + tree_nodes[6] in node_set_dict[boundary] && + tree_nodes[7] in node_set_dict[boundary] && + tree_nodes[8] in node_set_dict[boundary] + # Top face is position 6 in p4est indexing + boundary_names[6, tree] = boundary + end + end + end + + return boundary_names +end + """ P4estMeshCubedSphere(trees_per_face_dimension, layers, inner_radius, thickness; polydeg, RealT=Float64, @@ -1490,6 +1700,10 @@ function bilinear_interpolation!(coordinate, face_vertices, u, v) end end +function get_global_first_element_ids(mesh::P4estMesh) + return unsafe_wrap(Array, mesh.p4est.global_first_quadrant, mpi_nranks() + 1) +end + function balance!(mesh::P4estMesh{2}, init_fn = C_NULL) p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn) # Due to a bug in `p4est`, the forest needs to be rebalanced twice sometimes diff --git a/src/meshes/t8code_mesh.jl b/src/meshes/t8code_mesh.jl index 13edcc29711..cb2ac787e14 100644 --- a/src/meshes/t8code_mesh.jl +++ b/src/meshes/t8code_mesh.jl @@ -1,14 +1,12 @@ """ T8codeMesh{NDIMS} <: AbstractMesh{NDIMS} -An unstructured curved mesh based on trees that uses the C library +An unstructured curved mesh based on trees that uses the C library ['t8code'](https://github.com/DLR-AMR/t8code) to manage trees and mesh refinement. """ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: AbstractMesh{NDIMS} - cmesh :: Ptr{t8_cmesh} # cpointer to coarse mesh - scheme :: Ptr{t8_eclass_scheme} # cpointer to element scheme forest :: Ptr{t8_forest} # cpointer to forest is_parallel :: IsParallel @@ -25,14 +23,15 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: nmortars :: Int nboundaries :: Int - function T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes, + nmpiinterfaces :: Int + nmpimortars :: Int + + function T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes, boundary_names, current_filename) where {NDIMS} - is_parallel = False() + is_parallel = mpi_isparallel() ? True() : False() - mesh = new{NDIMS, Float64, typeof(is_parallel), NDIMS + 2, length(nodes)}(cmesh, - scheme, - forest, + mesh = new{NDIMS, Float64, typeof(is_parallel), NDIMS + 2, length(nodes)}(forest, is_parallel) mesh.nodes = nodes @@ -52,7 +51,7 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: # further down. However, this might cause a pile-up of `mesh` # objects during long-running sessions. if !MPI.Finalized() - trixi_t8_unref_forest(mesh.forest) + t8_forest_unref(Ref(mesh.forest)) end end @@ -63,7 +62,7 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: # more information. if haskey(ENV, "TRIXI_T8CODE_SC_FINALIZE") MPI.add_finalize_hook!() do - trixi_t8_unref_forest(mesh.forest) + t8_forest_unref(Ref(mesh.forest)) end end @@ -72,16 +71,15 @@ mutable struct T8codeMesh{NDIMS, RealT <: Real, IsParallel, NDIMSP2, NNODES} <: end const SerialT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:False} +const ParallelT8codeMesh{NDIMS} = T8codeMesh{NDIMS, <:Real, <:True} @inline mpi_parallel(mesh::SerialT8codeMesh) = False() +@inline mpi_parallel(mesh::ParallelT8codeMesh) = True() @inline Base.ndims(::T8codeMesh{NDIMS}) where {NDIMS} = NDIMS @inline Base.real(::T8codeMesh{NDIMS, RealT}) where {NDIMS, RealT} = RealT -@inline ntrees(mesh::T8codeMesh) = Int(t8_forest_get_num_local_trees(mesh.forest)) +@inline ntrees(mesh::T8codeMesh) = size(mesh.tree_node_coordinates)[end] @inline ncells(mesh::T8codeMesh) = Int(t8_forest_get_local_num_elements(mesh.forest)) -@inline ninterfaces(mesh::T8codeMesh) = mesh.ninterfaces -@inline nmortars(mesh::T8codeMesh) = mesh.nmortars -@inline nboundaries(mesh::T8codeMesh) = mesh.nboundaries function Base.show(io::IO, mesh::T8codeMesh) print(io, "T8codeMesh{", ndims(mesh), ", ", real(mesh), "}") @@ -115,19 +113,49 @@ Non-periodic boundaries will be called ':x_neg', ':x_pos', ':y_neg', ':y_pos', ' - 'polydeg::Integer': polynomial degree used to store the geometry of the mesh. The mapping will be approximated by an interpolation polynomial of the specified degree for each tree. -- 'mapping': a function of 'NDIMS' variables to describe the mapping that transforms - the reference mesh ('[-1, 1]^n') to the physical domain. +- `mapping`: a function of `NDIMS` variables to describe the mapping that transforms + the reference mesh (`[-1, 1]^n`) to the physical domain. + Use only one of `mapping`, `faces` and `coordinates_min`/`coordinates_max`. +- `faces::NTuple{2*NDIMS}`: a tuple of `2 * NDIMS` functions that describe the faces of the domain. + Each function must take `NDIMS-1` arguments. + `faces[1]` describes the face onto which the face in negative x-direction + of the unit hypercube is mapped. The face in positive x-direction of + the unit hypercube will be mapped onto the face described by `faces[2]`. + `faces[3:4]` describe the faces in positive and negative y-direction respectively + (in 2D and 3D). + `faces[5:6]` describe the faces in positive and negative z-direction respectively (in 3D). + Use only one of `mapping`, `faces` and `coordinates_min`/`coordinates_max`. +- `coordinates_min`: vector or tuple of the coordinates of the corner in the negative direction of each dimension + to create a rectangular mesh. + Use only one of `mapping`, `faces` and `coordinates_min`/`coordinates_max`. +- `coordinates_max`: vector or tuple of the coordinates of the corner in the positive direction of each dimension + to create a rectangular mesh. + Use only one of `mapping`, `faces` and `coordinates_min`/`coordinates_max`. - 'RealT::Type': the type that should be used for coordinates. - 'initial_refinement_level::Integer': refine the mesh uniformly to this level before the simulation starts. - 'periodicity': either a 'Bool' deciding if all of the boundaries are periodic or an 'NTuple{NDIMS, Bool}' deciding for each dimension if the boundaries in this dimension are periodic. """ -function T8codeMesh(trees_per_dimension; polydeg, - mapping = coordinates2mapping((-1.0, -1.0), (1.0, 1.0)), - RealT = Float64, initial_refinement_level = 0, periodicity = true) - NDIMS = length(trees_per_dimension) +function T8codeMesh(trees_per_dimension; polydeg = 1, + mapping = nothing, faces = nothing, coordinates_min = nothing, + coordinates_max = nothing, + RealT = Float64, initial_refinement_level = 0, + periodicity = true) + @assert ((coordinates_min === nothing)===(coordinates_max === nothing)) "Either both or none of coordinates_min and coordinates_max must be specified" + + @assert count(i -> i !== nothing, + (mapping, faces, coordinates_min))==1 "Exactly one of mapping, faces and coordinates_min/max must be specified" + + # Extract mapping + if faces !== nothing + validate_faces(faces) + mapping = transfinite_mapping(faces) + elseif coordinates_min !== nothing + mapping = coordinates2mapping(coordinates_min, coordinates_max) + end - @assert NDIMS == 2 # Only support for NDIMS = 2 yet. + NDIMS = length(trees_per_dimension) + @assert (NDIMS == 2||NDIMS == 3) "NDIMS should be 2 or 3." # Convert periodicity to a Tuple of a Bool for every dimension if all(periodicity) @@ -141,43 +169,73 @@ function T8codeMesh(trees_per_dimension; polydeg, periodicity = Tuple(periodicity) end - conn = T8code.Libt8.p4est_connectivity_new_brick(trees_per_dimension..., periodicity...) do_partition = 0 - cmesh = t8_cmesh_new_from_p4est(conn, mpi_comm(), do_partition) - T8code.Libt8.p4est_connectivity_destroy(conn) + if NDIMS == 2 + conn = T8code.Libt8.p4est_connectivity_new_brick(trees_per_dimension..., + periodicity...) + cmesh = t8_cmesh_new_from_p4est(conn, mpi_comm(), do_partition) + T8code.Libt8.p4est_connectivity_destroy(conn) + elseif NDIMS == 3 + conn = T8code.Libt8.p8est_connectivity_new_brick(trees_per_dimension..., + periodicity...) + cmesh = t8_cmesh_new_from_p8est(conn, mpi_comm(), do_partition) + T8code.Libt8.p8est_connectivity_destroy(conn) + end + do_face_ghost = mpi_isparallel() scheme = t8_scheme_new_default_cxx() - forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm()) + forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost, + mpi_comm()) basis = LobattoLegendreBasis(RealT, polydeg) nodes = basis.nodes + num_trees = t8_cmesh_get_num_trees(cmesh) + tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS, ntuple(_ -> length(nodes), NDIMS)..., - prod(trees_per_dimension)) + num_trees) - # Get cell length in reference mesh: Omega_ref = [-1,1]^2. - dx = 2 / trees_per_dimension[1] - dy = 2 / trees_per_dimension[2] - - num_local_trees = t8_cmesh_get_num_local_trees(cmesh) + # Get cell length in reference mesh: Omega_ref = [-1,1]^NDIMS. + dx = [2 / n for n in trees_per_dimension] # Non-periodic boundaries. boundary_names = fill(Symbol("---"), 2 * NDIMS, prod(trees_per_dimension)) - for itree in 1:num_local_trees + if mapping === nothing + mapping_ = coordinates2mapping(ntuple(_ -> -1.0, NDIMS), ntuple(_ -> 1.0, NDIMS)) + else + mapping_ = mapping + end + + for itree in 1:num_trees veptr = t8_cmesh_get_tree_vertices(cmesh, itree - 1) verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS)) # Calculate node coordinates of reference mesh. - cell_x_offset = (verts[1, 1] - 1 / 2 * (trees_per_dimension[1] - 1)) * dx - cell_y_offset = (verts[2, 1] - 1 / 2 * (trees_per_dimension[2] - 1)) * dy - - for j in eachindex(nodes), i in eachindex(nodes) - tree_node_coordinates[:, i, j, itree] .= mapping(cell_x_offset + - dx * nodes[i] / 2, - cell_y_offset + - dy * nodes[j] / 2) + if NDIMS == 2 + cell_x_offset = (verts[1, 1] - 0.5 * (trees_per_dimension[1] - 1)) * dx[1] + cell_y_offset = (verts[2, 1] - 0.5 * (trees_per_dimension[2] - 1)) * dx[2] + + for j in eachindex(nodes), i in eachindex(nodes) + tree_node_coordinates[:, i, j, itree] .= mapping_(cell_x_offset + + dx[1] * nodes[i] / 2, + cell_y_offset + + dx[2] * nodes[j] / 2) + end + elseif NDIMS == 3 + cell_x_offset = (verts[1, 1] - 0.5 * (trees_per_dimension[1] - 1)) * dx[1] + cell_y_offset = (verts[2, 1] - 0.5 * (trees_per_dimension[2] - 1)) * dx[2] + cell_z_offset = (verts[3, 1] - 0.5 * (trees_per_dimension[3] - 1)) * dx[3] + + for k in eachindex(nodes), j in eachindex(nodes), i in eachindex(nodes) + tree_node_coordinates[:, i, j, k, itree] .= mapping_(cell_x_offset + + dx[1] * nodes[i] / 2, + cell_y_offset + + dx[2] * nodes[j] / 2, + cell_z_offset + + dx[3] * nodes[k] / 2) + end end if !periodicity[1] @@ -189,16 +247,23 @@ function T8codeMesh(trees_per_dimension; polydeg, boundary_names[3, itree] = :y_neg boundary_names[4, itree] = :y_pos end + + if NDIMS > 2 + if !periodicity[3] + boundary_names[5, itree] = :z_neg + boundary_names[6, itree] = :z_pos + end + end end - return T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes, + return T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes, boundary_names, "") end """ - T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh}, - mapping=nothing, polydeg=1, RealT=Float64, - initial_refinement_level=0) + T8codeMesh(cmesh::Ptr{t8_cmesh}, + mapping=nothing, polydeg=1, RealT=Float64, + initial_refinement_level=0) Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming mesh from a `t8_cmesh` data structure. @@ -215,68 +280,125 @@ conforming mesh from a `t8_cmesh` data structure. - `RealT::Type`: the type that should be used for coordinates. - `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. """ -function T8codeMesh{NDIMS}(cmesh::Ptr{t8_cmesh}; - mapping = nothing, polydeg = 1, RealT = Float64, - initial_refinement_level = 0) where {NDIMS} - @assert NDIMS == 2 # Only support for NDIMS = 2 yet. +function T8codeMesh(cmesh::Ptr{t8_cmesh}; + mapping = nothing, polydeg = 1, RealT = Float64, + initial_refinement_level = 0) + @assert (t8_cmesh_get_num_trees(cmesh)>0) "Given `cmesh` does not contain any trees." + + # Infer NDIMS from the geometry of the first tree. + NDIMS = Int(t8_geom_get_dimension(t8_cmesh_get_tree_geometry(cmesh, 0))) + @assert (NDIMS == 2||NDIMS == 3) "NDIMS should be 2 or 3." + + do_face_ghost = mpi_isparallel() scheme = t8_scheme_new_default_cxx() - forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, 0, mpi_comm()) + forest = t8_forest_new_uniform(cmesh, scheme, initial_refinement_level, do_face_ghost, + mpi_comm()) basis = LobattoLegendreBasis(RealT, polydeg) nodes = basis.nodes - num_local_trees = t8_cmesh_get_num_local_trees(cmesh) + num_trees = t8_cmesh_get_num_trees(cmesh) tree_node_coordinates = Array{RealT, NDIMS + 2}(undef, NDIMS, ntuple(_ -> length(nodes), NDIMS)..., - num_local_trees) + num_trees) nodes_in = [-1.0, 1.0] matrix = polynomial_interpolation_matrix(nodes_in, nodes) - data_in = Array{RealT, 3}(undef, 2, 2, 2) - tmp1 = zeros(RealT, 2, length(nodes), length(nodes_in)) - for itree in 0:(num_local_trees - 1) - veptr = t8_cmesh_get_tree_vertices(cmesh, itree) - verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS)) + num_local_trees = t8_cmesh_get_num_local_trees(cmesh) + + if NDIMS == 2 + data_in = Array{RealT, 3}(undef, 2, 2, 2) + tmp1 = zeros(RealT, 2, length(nodes), length(nodes_in)) + verts = zeros(3, 4) + + for itree in 0:(num_local_trees - 1) + veptr = t8_cmesh_get_tree_vertices(cmesh, itree) + + # Note, `verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))` + # sometimes does not work since `veptr` is not necessarily properly + # aligned to 8 bytes. + for icorner in 1:4 + verts[1, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 1) + verts[2, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 2) + end + + # Check if tree's node ordering is right-handed or print a warning. + let z = zero(eltype(verts)), o = one(eltype(verts)) + u = verts[:, 2] - verts[:, 1] + v = verts[:, 3] - verts[:, 1] + w = [z, z, o] - u = verts[:, 2] - verts[:, 1] - v = verts[:, 3] - verts[:, 1] - w = [0.0, 0.0, 1.0] + # Triple product gives signed volume of spanned parallelepiped. + vol = dot(cross(u, v), w) - vol = dot(cross(u, v), w) + if vol < z + @warn "Discovered negative volumes in `cmesh`: vol = $vol" + end + end - if vol < 0.0 - @warn "Discovered negative volumes in `cmesh`: vol = $vol" + # Tree vertices are stored in z-order. + @views data_in[:, 1, 1] .= verts[1:2, 1] + @views data_in[:, 2, 1] .= verts[1:2, 2] + @views data_in[:, 1, 2] .= verts[1:2, 3] + @views data_in[:, 2, 2] .= verts[1:2, 4] + + # Interpolate corner coordinates to specified nodes. + multiply_dimensionwise!(view(tree_node_coordinates, :, :, :, itree + 1), + matrix, matrix, + data_in, + tmp1) end - # Tree vertices are stored in z-order. - @views data_in[:, 1, 1] .= verts[1:2, 1] - @views data_in[:, 2, 1] .= verts[1:2, 2] - @views data_in[:, 1, 2] .= verts[1:2, 3] - @views data_in[:, 2, 2] .= verts[1:2, 4] - - # Interpolate corner coordinates to specified nodes. - multiply_dimensionwise!(view(tree_node_coordinates, :, :, :, itree + 1), - matrix, matrix, - data_in, - tmp1) + elseif NDIMS == 3 + data_in = Array{RealT, 4}(undef, 3, 2, 2, 2) + tmp1 = zeros(RealT, 3, length(nodes), length(nodes_in), length(nodes_in)) + verts = zeros(3, 8) + + for itree in 0:(num_trees - 1) + veptr = t8_cmesh_get_tree_vertices(cmesh, itree) + + # Note, `verts = unsafe_wrap(Array, veptr, (3, 1 << NDIMS))` + # sometimes does not work since `veptr` is not necessarily properly + # aligned to 8 bytes. + for icorner in 1:8 + verts[1, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 1) + verts[2, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 2) + verts[3, icorner] = unsafe_load(veptr, (icorner - 1) * 3 + 3) + end + + # Tree vertices are stored in z-order. + @views data_in[:, 1, 1, 1] .= verts[1:3, 1] + @views data_in[:, 2, 1, 1] .= verts[1:3, 2] + @views data_in[:, 1, 2, 1] .= verts[1:3, 3] + @views data_in[:, 2, 2, 1] .= verts[1:3, 4] + + @views data_in[:, 1, 1, 2] .= verts[1:3, 5] + @views data_in[:, 2, 1, 2] .= verts[1:3, 6] + @views data_in[:, 1, 2, 2] .= verts[1:3, 7] + @views data_in[:, 2, 2, 2] .= verts[1:3, 8] + + # Interpolate corner coordinates to specified nodes. + multiply_dimensionwise!(view(tree_node_coordinates, :, :, :, :, itree + 1), + matrix, matrix, matrix, + data_in, + tmp1) + end end map_node_coordinates!(tree_node_coordinates, mapping) # There's no simple and generic way to distinguish boundaries. Name all of them :all. - boundary_names = fill(:all, 2 * NDIMS, num_local_trees) + boundary_names = fill(:all, 2 * NDIMS, num_trees) - return T8codeMesh{NDIMS}(cmesh, scheme, forest, tree_node_coordinates, nodes, + return T8codeMesh{NDIMS}(forest, tree_node_coordinates, nodes, boundary_names, "") end """ - T8codeMesh{NDIMS}(conn::Ptr{p4est_connectivity}, - mapping=nothing, polydeg=1, RealT=Float64, - initial_refinement_level=0) + T8codeMesh(conn::Ptr{p4est_connectivity}; kwargs...) Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming mesh from a `p4est_connectivity` data structure. @@ -293,24 +415,45 @@ conforming mesh from a `p4est_connectivity` data structure. - `RealT::Type`: the type that should be used for coordinates. - `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. """ -function T8codeMesh{NDIMS}(conn::Ptr{p4est_connectivity}; kwargs...) where {NDIMS} - @assert NDIMS == 2 # Only support for NDIMS = 2 yet. - +function T8codeMesh(conn::Ptr{p4est_connectivity}; kwargs...) cmesh = t8_cmesh_new_from_p4est(conn, mpi_comm(), 0) - return T8codeMesh{NDIMS}(cmesh; kwargs...) + return T8codeMesh(cmesh; kwargs...) +end + +""" + T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...) + +Main mesh constructor for the `T8codeMesh` that imports an unstructured, +conforming mesh from a `p4est_connectivity` data structure. + +# Arguments +- `conn::Ptr{p4est_connectivity}`: Pointer to a P4est connectivity object. +- `mapping`: a function of `NDIMS` variables to describe the mapping that transforms + the imported mesh to the physical domain. Use `nothing` for the identity map. +- `polydeg::Integer`: polynomial degree used to store the geometry of the mesh. + The mapping will be approximated by an interpolation polynomial + of the specified degree for each tree. + The default of `1` creates an uncurved geometry. Use a higher value if the mapping + will curve the imported uncurved mesh. +- `RealT::Type`: the type that should be used for coordinates. +- `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. +""" +function T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...) + cmesh = t8_cmesh_new_from_p8est(conn, mpi_comm(), 0) + + return T8codeMesh(cmesh; kwargs...) end """ - T8codeMesh{NDIMS}(meshfile::String; - mapping=nothing, polydeg=1, RealT=Float64, - initial_refinement_level=0) + T8codeMesh(meshfile::String, ndims; kwargs...) Main mesh constructor for the `T8codeMesh` that imports an unstructured, conforming mesh from a Gmsh mesh file (`.msh`). # Arguments - `meshfile::String`: path to a Gmsh mesh file. +- `ndims`: Mesh file dimension: `2` or `3`. - `mapping`: a function of `NDIMS` variables to describe the mapping that transforms the imported mesh to the physical domain. Use `nothing` for the identity map. - `polydeg::Integer`: polynomial degree used to store the geometry of the mesh. @@ -321,25 +464,656 @@ mesh from a Gmsh mesh file (`.msh`). - `RealT::Type`: the type that should be used for coordinates. - `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. """ -function T8codeMesh{NDIMS}(meshfile::String; kwargs...) where {NDIMS} - @assert NDIMS == 2 # Only support for NDIMS = 2 yet. - +function T8codeMesh(meshfile::String, ndims; kwargs...) # Prevent `t8code` from crashing Julia if the file doesn't exist. @assert isfile(meshfile) meshfile_prefix, meshfile_suffix = splitext(meshfile) - cmesh = t8_cmesh_from_msh_file(meshfile_prefix, 0, mpi_comm(), NDIMS, 0, 0) + cmesh = t8_cmesh_from_msh_file(meshfile_prefix, 0, mpi_comm(), ndims, 0, 0) + + return T8codeMesh(cmesh; kwargs...) +end - return T8codeMesh{NDIMS}(cmesh; kwargs...) +struct adapt_callback_passthrough + adapt_callback::Function + user_data::Any end -# TODO: Just a placeholder. Will be implemented later when MPI is supported. -function balance!(mesh::T8codeMesh, init_fn = C_NULL) +# Callback function prototype to decide for refining and coarsening. +# If `is_family` equals 1, the first `num_elements` in elements +# form a family and we decide whether this family should be coarsened +# or only the first element should be refined. +# Otherwise `is_family` must equal zero and we consider the first entry +# of the element array for refinement. +# Entries of the element array beyond the first `num_elements` are undefined. +# \param [in] forest the forest to which the new elements belong +# \param [in] forest_from the forest that is adapted. +# \param [in] which_tree the local tree containing `elements` +# \param [in] lelement_id the local element id in `forest_old` in the tree of the current element +# \param [in] ts the eclass scheme of the tree +# \param [in] is_family if 1, the first `num_elements` entries in `elements` form a family. If 0, they do not. +# \param [in] num_elements the number of entries in `elements` that are defined +# \param [in] elements Pointers to a family or, if `is_family` is zero, +# pointer to one element. +# \return greater zero if the first entry in `elements` should be refined, +# smaller zero if the family `elements` shall be coarsened, +# zero else. +function adapt_callback_wrapper(forest, + forest_from, + which_tree, + lelement_id, + ts, + is_family, + num_elements, + elements_ptr)::Cint + passthrough = unsafe_pointer_to_objref(t8_forest_get_user_data(forest))[] + + elements = unsafe_wrap(Array, elements_ptr, num_elements) + + return passthrough.adapt_callback(forest_from, which_tree, ts, lelement_id, elements, + Bool(is_family), passthrough.user_data) +end + +""" + Trixi.adapt!(mesh::T8codeMesh, adapt_callback; kwargs...) + +Adapt a `T8codeMesh` according to a user-defined `adapt_callback`. + +# Arguments +- `mesh::T8codeMesh`: Initialized mesh object. +- `adapt_callback`: A user-defined callback which tells the adaption routines + if an element should be refined, coarsened or stay unchanged. + + The expected callback signature is as follows: + + `adapt_callback(forest, ltreeid, eclass_scheme, lelemntid, elements, is_family, user_data)` + # Arguments + - `forest`: Pointer to the analyzed forest. + - `ltreeid`: Local index of the current tree where the analyzed elements are part of. + - `eclass_scheme`: Element class of `elements`. + - `lelemntid`: Local index of the first element in `elements`. + - `elements`: Array of elements. If consecutive elements form a family + they are passed together, otherwise `elements` consists of just one element. + - `is_family`: Boolean signifying if `elements` represents a family or not. + - `user_data`: Void pointer to some arbitrary user data. Default value is `C_NULL`. + # Returns + -1 : Coarsen family of elements. + 0 : Stay unchanged. + 1 : Refine element. + +- `kwargs`: + - `recursive = true`: Adapt the forest recursively. If true the caller must ensure that the callback + returns 0 for every analyzed element at some point to stop the recursion. + - `balance = true`: Make sure the adapted forest is 2^(NDIMS-1):1 balanced. + - `partition = true`: Partition the forest to redistribute elements evenly among MPI ranks. + - `ghost = true`: Create a ghost layer for MPI data exchange. + - `user_data = C_NULL`: Pointer to some arbitrary user-defined data. +""" +function adapt!(mesh::T8codeMesh, adapt_callback; recursive = true, balance = true, + partition = true, ghost = true, user_data = C_NULL) + # Check that forest is a committed, that is valid and usable, forest. + @assert t8_forest_is_committed(mesh.forest) != 0 + + # Init new forest. + new_forest_ref = Ref{t8_forest_t}() + t8_forest_init(new_forest_ref) + new_forest = new_forest_ref[] + + # Check out `examples/t8_step4_partition_balance_ghost.jl` in + # https://github.com/DLR-AMR/T8code.jl for detailed explanations. + let set_from = C_NULL, set_for_coarsening = 0, no_repartition = !partition + t8_forest_set_user_data(new_forest, + pointer_from_objref(Ref(adapt_callback_passthrough(adapt_callback, + user_data)))) + t8_forest_set_adapt(new_forest, mesh.forest, + @t8_adapt_callback(adapt_callback_wrapper), + recursive) + if balance + t8_forest_set_balance(new_forest, set_from, no_repartition) + end + + if partition + t8_forest_set_partition(new_forest, set_from, set_for_coarsening) + end + + t8_forest_set_ghost(new_forest, ghost, T8_GHOST_FACES) # Note: MPI support not available yet so it is a dummy call. + + # The old forest is destroyed here. + # Call `t8_forest_ref(Ref(mesh.forest))` to keep it. + t8_forest_commit(new_forest) + end + + mesh.forest = new_forest + return nothing end -# TODO: Just a placeholder. Will be implemented later when MPI is supported. -function partition!(mesh::T8codeMesh; allow_coarsening = true, weight_fn = C_NULL) +""" + Trixi.balance!(mesh::T8codeMesh) + +Balance a `T8codeMesh` to ensure 2^(NDIMS-1):1 face neighbors. +""" +function balance!(mesh::T8codeMesh) + new_forest_ref = Ref{t8_forest_t}() + t8_forest_init(new_forest_ref) + new_forest = new_forest_ref[] + + let set_from = mesh.forest, no_repartition = 1, do_ghost = 1 + t8_forest_set_balance(new_forest, set_from, no_repartition) + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) + t8_forest_commit(new_forest) + end + + mesh.forest = new_forest + return nothing end + +""" + Trixi.partition!(mesh::T8codeMesh) + +Partition a `T8codeMesh` in order to redistribute elements evenly among MPI ranks. + +# Arguments +- `mesh::T8codeMesh`: Initialized mesh object. +""" +function partition!(mesh::T8codeMesh) + new_forest_ref = Ref{t8_forest_t}() + t8_forest_init(new_forest_ref) + new_forest = new_forest_ref[] + + let set_from = mesh.forest, do_ghost = 1, allow_for_coarsening = 1 + t8_forest_set_partition(new_forest, set_from, allow_for_coarsening) + t8_forest_set_ghost(new_forest, do_ghost, T8_GHOST_FACES) + t8_forest_commit(new_forest) + end + + mesh.forest = new_forest + + return nothing +end + +# Compute the global ids (zero-indexed) of first element in each MPI rank. +function get_global_first_element_ids(mesh::T8codeMesh) + n_elements_local = Int(t8_forest_get_local_num_elements(mesh.forest)) + n_elements_by_rank = Vector{Int}(undef, mpi_nranks()) + n_elements_by_rank[mpi_rank() + 1] = n_elements_local + MPI.Allgather!(MPI.UBuffer(n_elements_by_rank, 1), mpi_comm()) + return [sum(n_elements_by_rank[1:(rank - 1)]) for rank in 1:(mpi_nranks() + 1)] +end + +function count_interfaces(mesh::T8codeMesh) + @assert t8_forest_is_committed(mesh.forest) != 0 + + num_local_elements = t8_forest_get_local_num_elements(mesh.forest) + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) + + current_index = t8_locidx_t(0) + + local_num_conform = 0 + local_num_mortars = 0 + local_num_boundary = 0 + + local_num_mpi_conform = 0 + local_num_mpi_mortars = 0 + + visited_global_mortar_ids = Set{UInt64}([]) + + max_level = t8_forest_get_maxlevel(mesh.forest) #UInt64 + max_tree_num_elements = UInt64(2^ndims(mesh))^max_level + + if mpi_isparallel() + ghost_num_trees = t8_forest_ghost_num_trees(mesh.forest) + + ghost_tree_element_offsets = [num_local_elements + + t8_forest_ghost_get_tree_element_offset(mesh.forest, + itree) + for itree in 0:(ghost_num_trees - 1)] + ghost_global_treeids = [t8_forest_ghost_get_global_treeid(mesh.forest, itree) + for itree in 0:(ghost_num_trees - 1)] + end + + for itree in 0:(num_local_trees - 1) + tree_class = t8_forest_get_tree_class(mesh.forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) + + num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + + global_itree = t8_forest_global_tree_id(mesh.forest, itree) + + for ielement in 0:(num_elements_in_tree - 1) + element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) + + level = t8_element_level(eclass_scheme, element) + + num_faces = t8_element_num_faces(eclass_scheme, element) + + # Note: This works only for forests of one element class. + current_linear_id = global_itree * max_tree_num_elements + + t8_element_get_linear_id(eclass_scheme, element, max_level) + + for iface in 0:(num_faces - 1) + pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() + pneighbor_leaves_ref = Ref{Ptr{Ptr{t8_element}}}() + pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() + + dual_faces_ref = Ref{Ptr{Cint}}() + num_neighbors_ref = Ref{Cint}() + + forest_is_balanced = Cint(1) + + t8_forest_leaf_face_neighbors(mesh.forest, itree, element, + pneighbor_leaves_ref, iface, dual_faces_ref, + num_neighbors_ref, + pelement_indices_ref, pneigh_scheme_ref, + forest_is_balanced) + + num_neighbors = num_neighbors_ref[] + dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) + neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], + num_neighbors) + neighbor_leaves = unsafe_wrap(Array, pneighbor_leaves_ref[], num_neighbors) + neighbor_scheme = pneigh_scheme_ref[] + + if num_neighbors == 0 + local_num_boundary += 1 + else + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leaves[1]) + + if all(neighbor_ielements .< num_local_elements) + # Conforming interface: The second condition ensures we + # only visit the interface once. + if level == neighbor_level && current_index <= neighbor_ielements[1] + local_num_conform += 1 + elseif level < neighbor_level + local_num_mortars += 1 + # `else level > neighbor_level` is ignored since we + # only want to count the mortar interface once. + end + else + if level == neighbor_level + local_num_mpi_conform += 1 + elseif level < neighbor_level + local_num_mpi_mortars += 1 + + global_mortar_id = 2 * ndims(mesh) * current_linear_id + iface + + else # level > neighbor_level + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= + neighbor_ielements[1])] + neighbor_linear_id = neighbor_global_ghost_itree * + max_tree_num_elements + + t8_element_get_linear_id(neighbor_scheme, + neighbor_leaves[1], + max_level) + global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + + dual_faces[1] + + if !(global_mortar_id in visited_global_mortar_ids) + push!(visited_global_mortar_ids, global_mortar_id) + local_num_mpi_mortars += 1 + end + end + end + end + + t8_free(dual_faces_ref[]) + t8_free(pneighbor_leaves_ref[]) + t8_free(pelement_indices_ref[]) + end # for + + current_index += 1 + end # for + end # for + + return (interfaces = local_num_conform, + mortars = local_num_mortars, + boundaries = local_num_boundary, + mpi_interfaces = local_num_mpi_conform, + mpi_mortars = local_num_mpi_mortars) +end + +# I know this routine is an unmaintainable behemoth. However, I see no real +# and elegant way to refactor this into, for example, smaller parts. The +# `t8_forest_leaf_face_neighbors` routine is as of now rather costly and it +# makes sense to query it only once per face per element and extract all the +# information needed at once in order to fill the connectivity information. +# Instead, I opted for good documentation. +function fill_mesh_info!(mesh::T8codeMesh, interfaces, mortars, boundaries, + boundary_names; mpi_mesh_info = nothing) + @assert t8_forest_is_committed(mesh.forest) != 0 + + num_local_elements = t8_forest_get_local_num_elements(mesh.forest) + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) + + if !isnothing(mpi_mesh_info) + #! format: off + remotes = t8_forest_ghost_get_remotes(mesh.forest) + ghost_num_trees = t8_forest_ghost_num_trees(mesh.forest) + + ghost_remote_first_elem = [num_local_elements + + t8_forest_ghost_remote_first_elem(mesh.forest, remote) + for remote in remotes] + + ghost_tree_element_offsets = [num_local_elements + + t8_forest_ghost_get_tree_element_offset(mesh.forest, itree) + for itree in 0:(ghost_num_trees - 1)] + + ghost_global_treeids = [t8_forest_ghost_get_global_treeid(mesh.forest, itree) + for itree in 0:(ghost_num_trees - 1)] + #! format: on + end + + # Process-local index of the current element in the space-filling curve. + current_index = t8_locidx_t(0) + + # Increment counters for the different interface/mortar/boundary types. + local_num_conform = 0 + local_num_mortars = 0 + local_num_boundary = 0 + + local_num_mpi_conform = 0 + local_num_mpi_mortars = 0 + + # Works for quads and hexs only. This mapping is needed in the MPI mortar + # sections below. + map_iface_to_ichild_to_position = [ + # 0 1 2 3 4 5 6 7 ichild/iface + [1, 0, 2, 0, 3, 0, 4, 0], # 0 + [0, 1, 0, 2, 0, 3, 0, 4], # 1 + [1, 2, 0, 0, 3, 4, 0, 0], # 2 + [0, 0, 1, 2, 0, 0, 3, 4], # 3 + [1, 2, 3, 4, 0, 0, 0, 0], # 4 + [0, 0, 0, 0, 1, 2, 3, 4], # 5 + ] + + # Helper variables to compute unique global MPI interface/mortar ids. + max_level = t8_forest_get_maxlevel(mesh.forest) #UInt64 + max_tree_num_elements = UInt64(2^ndims(mesh))^max_level + + # These two variables help to ensure that we count MPI mortars from smaller + # elements point of view only once. + visited_global_mortar_ids = Set{UInt64}([]) + global_mortar_id_to_local = Dict{UInt64, Int}([]) + + # Loop over all local trees. + for itree in 0:(num_local_trees - 1) + tree_class = t8_forest_get_tree_class(mesh.forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) + + num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + + global_itree = t8_forest_global_tree_id(mesh.forest, itree) + + # Loop over all local elements of the current local tree. + for ielement in 0:(num_elements_in_tree - 1) + element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) + + level = t8_element_level(eclass_scheme, element) + + num_faces = t8_element_num_faces(eclass_scheme, element) + + # Note: This works only for forests of one element class. + current_linear_id = global_itree * max_tree_num_elements + + t8_element_get_linear_id(eclass_scheme, element, max_level) + + # Loop over all faces of the current local element. + for iface in 0:(num_faces - 1) + # Compute the `orientation` of the touching faces. + if t8_element_is_root_boundary(eclass_scheme, element, iface) == 1 + cmesh = t8_forest_get_cmesh(mesh.forest) + itree_in_cmesh = t8_forest_ltreeid_to_cmesh_ltreeid(mesh.forest, itree) + iface_in_tree = t8_element_tree_face(eclass_scheme, element, iface) + orientation_ref = Ref{Cint}() + + t8_cmesh_get_face_neighbor(cmesh, itree_in_cmesh, iface_in_tree, C_NULL, + orientation_ref) + orientation = orientation_ref[] + else + orientation = zero(Cint) + end + + pelement_indices_ref = Ref{Ptr{t8_locidx_t}}() + pneighbor_leaves_ref = Ref{Ptr{Ptr{t8_element}}}() + pneigh_scheme_ref = Ref{Ptr{t8_eclass_scheme}}() + + dual_faces_ref = Ref{Ptr{Cint}}() + num_neighbors_ref = Ref{Cint}() + + forest_is_balanced = Cint(1) + + # Query neighbor information from t8code. + t8_forest_leaf_face_neighbors(mesh.forest, itree, element, + pneighbor_leaves_ref, iface, dual_faces_ref, + num_neighbors_ref, + pelement_indices_ref, pneigh_scheme_ref, + forest_is_balanced) + + num_neighbors = num_neighbors_ref[] + dual_faces = unsafe_wrap(Array, dual_faces_ref[], num_neighbors) + neighbor_ielements = unsafe_wrap(Array, pelement_indices_ref[], + num_neighbors) + neighbor_leaves = unsafe_wrap(Array, pneighbor_leaves_ref[], num_neighbors) + neighbor_scheme = pneigh_scheme_ref[] + + # Now we check for the different cases. The nested if-structure is as follows: + # + # if `boundary`: + # + # + # else: // It must be an interface or mortar. + # + # if `all neighbors are local elements`: + # + # if `local interface`: + # + # elseif `local mortar from larger element point of view`: + # + # else: // `local mortar from smaller elements point of view` + # // We only count local mortars once. + # + # else: // It must be either a MPI interface or a MPI mortar. + # + # if `MPI interface`: + # + # elseif `MPI mortar from larger element point of view`: + # + # else: // `MPI mortar from smaller elements point of view` + # + # + # // end + + # Domain boundary. + if num_neighbors == 0 + local_num_boundary += 1 + boundary_id = local_num_boundary + + boundaries.neighbor_ids[boundary_id] = current_index + 1 + + init_boundary_node_indices!(boundaries, iface, boundary_id) + + # One-based indexing. + boundaries.name[boundary_id] = boundary_names[iface + 1, itree + 1] + + # Interface or mortar. + else + neighbor_level = t8_element_level(neighbor_scheme, neighbor_leaves[1]) + + # Local interface or mortar. + if all(neighbor_ielements .< num_local_elements) + + # Local interface: The second condition ensures we only visit the interface once. + if level == neighbor_level && current_index <= neighbor_ielements[1] + local_num_conform += 1 + + interfaces.neighbor_ids[1, local_num_conform] = current_index + + 1 + interfaces.neighbor_ids[2, local_num_conform] = neighbor_ielements[1] + + 1 + + init_interface_node_indices!(interfaces, (iface, dual_faces[1]), + orientation, + local_num_conform) + # Local mortar. + elseif level < neighbor_level + local_num_mortars += 1 + + # Last entry is the large element. + mortars.neighbor_ids[end, local_num_mortars] = current_index + 1 + + init_mortar_neighbor_ids!(mortars, iface, dual_faces[1], + orientation, neighbor_ielements, + local_num_mortars) + + init_mortar_node_indices!(mortars, (dual_faces[1], iface), + orientation, local_num_mortars) + + # else: `level > neighbor_level` is skipped since we visit the mortar interface only once. + end + + # MPI interface or MPI mortar. + else + + # MPI interface. + if level == neighbor_level + local_num_mpi_conform += 1 + + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= + neighbor_ielements[1])] + + neighbor_linear_id = neighbor_global_ghost_itree * + max_tree_num_elements + + t8_element_get_linear_id(neighbor_scheme, + neighbor_leaves[1], + max_level) + + if current_linear_id < neighbor_linear_id + local_side = 1 + smaller_iface = iface + smaller_linear_id = current_linear_id + faces = (iface, dual_faces[1]) + else + local_side = 2 + smaller_iface = dual_faces[1] + smaller_linear_id = neighbor_linear_id + faces = (dual_faces[1], iface) + end + + global_interface_id = 2 * ndims(mesh) * smaller_linear_id + + smaller_iface + + mpi_mesh_info.mpi_interfaces.local_neighbor_ids[local_num_mpi_conform] = current_index + + 1 + mpi_mesh_info.mpi_interfaces.local_sides[local_num_mpi_conform] = local_side + + init_mpi_interface_node_indices!(mpi_mesh_info.mpi_interfaces, + faces, local_side, orientation, + local_num_mpi_conform) + + neighbor_rank = remotes[findlast(ghost_remote_first_elem .<= + neighbor_ielements[1])] + mpi_mesh_info.neighbor_ranks_interface[local_num_mpi_conform] = neighbor_rank + + mpi_mesh_info.global_interface_ids[local_num_mpi_conform] = global_interface_id + + # MPI Mortar: from larger element point of view + elseif level < neighbor_level + local_num_mpi_mortars += 1 + + global_mortar_id = 2 * ndims(mesh) * current_linear_id + iface + + neighbor_ids = neighbor_ielements .+ 1 + + local_neighbor_positions = findall(neighbor_ids .<= + num_local_elements) + local_neighbor_ids = [neighbor_ids[i] + for i in local_neighbor_positions] + local_neighbor_positions = [map_iface_to_ichild_to_position[dual_faces[1] + 1][t8_element_child_id(neighbor_scheme, neighbor_leaves[i]) + 1] + for i in local_neighbor_positions] + + # Last entry is the large element. + push!(local_neighbor_ids, current_index + 1) + push!(local_neighbor_positions, 2^(ndims(mesh) - 1) + 1) + + mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_num_mpi_mortars] = local_neighbor_ids + mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_num_mpi_mortars] = local_neighbor_positions + + init_mortar_node_indices!(mpi_mesh_info.mpi_mortars, + (dual_faces[1], iface), orientation, + local_num_mpi_mortars) + + neighbor_ranks = [remotes[findlast(ghost_remote_first_elem .<= + ineighbor_ghost)] + for ineighbor_ghost in filter(x -> x >= + num_local_elements, + neighbor_ielements)] + mpi_mesh_info.neighbor_ranks_mortar[local_num_mpi_mortars] = neighbor_ranks + + mpi_mesh_info.global_mortar_ids[local_num_mpi_mortars] = global_mortar_id + + # MPI Mortar: from smaller elements point of view + else + neighbor_global_ghost_itree = ghost_global_treeids[findlast(ghost_tree_element_offsets .<= + neighbor_ielements[1])] + neighbor_linear_id = neighbor_global_ghost_itree * + max_tree_num_elements + + t8_element_get_linear_id(neighbor_scheme, + neighbor_leaves[1], + max_level) + global_mortar_id = 2 * ndims(mesh) * neighbor_linear_id + + dual_faces[1] + + if global_mortar_id in visited_global_mortar_ids + local_mpi_mortar_id = global_mortar_id_to_local[global_mortar_id] + + push!(mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_mpi_mortar_id], + current_index + 1) + push!(mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_mpi_mortar_id], + map_iface_to_ichild_to_position[iface + 1][t8_element_child_id(eclass_scheme, element) + 1]) + else + local_num_mpi_mortars += 1 + local_mpi_mortar_id = local_num_mpi_mortars + push!(visited_global_mortar_ids, global_mortar_id) + global_mortar_id_to_local[global_mortar_id] = local_mpi_mortar_id + + mpi_mesh_info.mpi_mortars.local_neighbor_ids[local_mpi_mortar_id] = [ + current_index + 1, + ] + mpi_mesh_info.mpi_mortars.local_neighbor_positions[local_mpi_mortar_id] = [ + map_iface_to_ichild_to_position[iface + 1][t8_element_child_id(eclass_scheme, element) + 1], + ] + init_mortar_node_indices!(mpi_mesh_info.mpi_mortars, + (iface, dual_faces[1]), + orientation, local_mpi_mortar_id) + + neighbor_ranks = [ + remotes[findlast(ghost_remote_first_elem .<= + neighbor_ielements[1])], + ] + mpi_mesh_info.neighbor_ranks_mortar[local_mpi_mortar_id] = neighbor_ranks + + mpi_mesh_info.global_mortar_ids[local_mpi_mortar_id] = global_mortar_id + end + end + end + end + + t8_free(dual_faces_ref[]) + t8_free(pneighbor_leaves_ref[]) + t8_free(pelement_indices_ref[]) + end # for iface + + current_index += 1 + end # for ielement + end # for itree + + return nothing +end + +#! format: off +@deprecate T8codeMesh{2}(conn::Ptr{p4est_connectivity}; kwargs...) T8codeMesh(conn::Ptr{p4est_connectivity}; kwargs...) +@deprecate T8codeMesh{3}(conn::Ptr{p8est_connectivity}; kwargs...) T8codeMesh(conn::Ptr{p8est_connectivity}; kwargs...) +@deprecate T8codeMesh{2}(meshfile::String; kwargs...) T8codeMesh(meshfile::String, 2; kwargs...) +@deprecate T8codeMesh{3}(meshfile::String; kwargs...) T8codeMesh(meshfile::String, 3; kwargs...) +#! format: on diff --git a/src/semidiscretization/semidiscretization.jl b/src/semidiscretization/semidiscretization.jl index fe7858e31ee..8518cf27fd3 100644 --- a/src/semidiscretization/semidiscretization.jl +++ b/src/semidiscretization/semidiscretization.jl @@ -253,6 +253,9 @@ end function _jacobian_ad_forward(semi, t0, u0_ode, du_ode, config) new_semi = remake(semi, uEltype = eltype(config)) + # Create anonymous function passed as first argument to `ForwardDiff.jacobian` to match + # `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray, + # cfg::JacobianConfig = JacobianConfig(f!, y, x), check=Val{true}())` J = ForwardDiff.jacobian(du_ode, u0_ode, config) do du_ode, u_ode Trixi.rhs!(du_ode, u_ode, new_semi, t0) end @@ -279,6 +282,9 @@ end function _jacobian_ad_forward_structarrays(semi, t0, u0_ode_plain, du_ode_plain, config) new_semi = remake(semi, uEltype = eltype(config)) + # Create anonymous function passed as first argument to `ForwardDiff.jacobian` to match + # `ForwardDiff.jacobian(f!, y::AbstractArray, x::AbstractArray, + # cfg::JacobianConfig = JacobianConfig(f!, y, x), check=Val{true}())` J = ForwardDiff.jacobian(du_ode_plain, u0_ode_plain, config) do du_ode_plain, u_ode_plain u_ode = StructArray{SVector{nvariables(semi), eltype(config)}}(ntuple(v -> view(u_ode_plain, diff --git a/src/semidiscretization/semidiscretization_coupled.jl b/src/semidiscretization/semidiscretization_coupled.jl index 0941ae6a8ca..dc21dbe9a1e 100644 --- a/src/semidiscretization/semidiscretization_coupled.jl +++ b/src/semidiscretization/semidiscretization_coupled.jl @@ -1,3 +1,10 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + """ SemidiscretizationCoupled @@ -65,11 +72,13 @@ function Base.show(io::IO, ::MIME"text/plain", semi::SemidiscretizationCoupled) summary_line(io, "system", i) mesh, equations, solver, _ = mesh_equations_solver_cache(semi.semis[i]) summary_line(increment_indent(io), "mesh", mesh |> typeof |> nameof) - summary_line(increment_indent(io), "equations", equations |> typeof |> nameof) + summary_line(increment_indent(io), "equations", + equations |> typeof |> nameof) summary_line(increment_indent(io), "initial condition", semi.semis[i].initial_condition) # no boundary conditions since that could be too much - summary_line(increment_indent(io), "source terms", semi.semis[i].source_terms) + summary_line(increment_indent(io), "source terms", + semi.semis[i].source_terms) summary_line(increment_indent(io), "solver", solver |> typeof |> nameof) end summary_line(io, "total #DOFs per field", ndofs(semi)) @@ -106,20 +115,14 @@ end @inline Base.real(semi::SemidiscretizationCoupled) = promote_type(real.(semi.semis)...) -@inline Base.eltype(semi::SemidiscretizationCoupled) = promote_type(eltype.(semi.semis)...) +@inline function Base.eltype(semi::SemidiscretizationCoupled) + promote_type(eltype.(semi.semis)...) +end @inline function ndofs(semi::SemidiscretizationCoupled) sum(ndofs, semi.semis) end -@inline function nelements(semi::SemidiscretizationCoupled) - return sum(semi.semis) do semi_ - mesh, equations, solver, cache = mesh_equations_solver_cache(semi_) - - nelements(mesh, solver, cache) - end -end - function compute_coefficients(t, semi::SemidiscretizationCoupled) @unpack u_indices = semi @@ -137,23 +140,40 @@ end @view u_ode[semi.u_indices[index]] end +# Same as `foreach(enumerate(something))`, but without allocations. +# +# Note that compile times may increase if this is used with big tuples. +@inline foreach_enumerate(func, collection) = foreach_enumerate(func, collection, 1) +@inline foreach_enumerate(func, collection::Tuple{}, index) = nothing + +@inline function foreach_enumerate(func, collection, index) + element = first(collection) + remaining_collection = Base.tail(collection) + + func((index, element)) + + # Process remaining collection + foreach_enumerate(func, remaining_collection, index + 1) +end + function rhs!(du_ode, u_ode, semi::SemidiscretizationCoupled, t) @unpack u_indices = semi time_start = time_ns() @trixi_timeit timer() "copy to coupled boundaries" begin - for semi_ in semi.semis - copy_to_coupled_boundary!(semi_.boundary_conditions, u_ode, semi) + foreach(semi.semis) do semi_ + copy_to_coupled_boundary!(semi_.boundary_conditions, u_ode, semi, semi_) end end # Call rhs! for each semidiscretization - for i in eachsystem(semi) - u_loc = get_system_u_ode(u_ode, i, semi) - du_loc = get_system_u_ode(du_ode, i, semi) - - @trixi_timeit timer() "system #$i" rhs!(du_loc, u_loc, semi.semis[i], t) + @trixi_timeit timer() "copy to coupled boundaries" begin + foreach_enumerate(semi.semis) do (i, semi_) + u_loc = get_system_u_ode(u_ode, i, semi) + du_loc = get_system_u_ode(du_ode, i, semi) + rhs!(du_loc, u_loc, semi_, t) + end end runtime = time_ns() - time_start @@ -309,7 +329,8 @@ end for i in eachsystem(semi) u_ode_slice = get_system_u_ode(u_ode, i, semi) - save_solution_file(semis[i], u_ode_slice, solution_callback, integrator, system = i) + save_solution_file(semis[i], u_ode_slice, solution_callback, integrator, + system = i) end end @@ -332,7 +353,7 @@ end ################################################################################ """ - BoundaryConditionCoupled(other_semi_index, indices, uEltype) + BoundaryConditionCoupled(other_semi_index, indices, uEltype, coupling_converter) Boundary condition to glue two meshes together. Solution values at the boundary of another mesh will be used as boundary values. This requires the use @@ -348,32 +369,37 @@ This is currently only implemented for [`StructuredMesh`](@ref). - `indices::Tuple`: node/cell indices at the boundary of the mesh in the other semidiscretization. See examples below. - `uEltype::Type`: element type of solution +- `coupling_converter::CouplingConverter`: function to call for converting the solution + state of one system to the other system # Examples ```julia # Connect the left boundary of mesh 2 to our boundary such that our positive # boundary direction will match the positive y direction of the other boundary -BoundaryConditionCoupled(2, (:begin, :i), Float64) +BoundaryConditionCoupled(2, (:begin, :i), Float64, fun) # Connect the same two boundaries oppositely oriented -BoundaryConditionCoupled(2, (:begin, :i_backwards), Float64) +BoundaryConditionCoupled(2, (:begin, :i_backwards), Float64, fun) # Using this as y_neg boundary will connect `our_cells[i, 1, j]` to `other_cells[j, end-i, end]` -BoundaryConditionCoupled(2, (:j, :i_backwards, :end), Float64) +BoundaryConditionCoupled(2, (:j, :i_backwards, :end), Float64, fun) ``` !!! warning "Experimental code" This is an experimental feature and can change any time. """ -mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices} +mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indices, + CouplingConverter} # NDIMST2M1 == NDIMS * 2 - 1 # Buffer for boundary values: [variable, nodes_i, nodes_j, cell_i, cell_j] - u_boundary :: Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 - other_semi_index :: Int - other_orientation :: Int - indices :: Indices - - function BoundaryConditionCoupled(other_semi_index, indices, uEltype) + u_boundary :: Array{uEltype, NDIMST2M1} # NDIMS * 2 - 1 + other_semi_index :: Int + other_orientation :: Int + indices :: Indices + coupling_converter :: CouplingConverter + + function BoundaryConditionCoupled(other_semi_index, indices, uEltype, + coupling_converter) NDIMS = length(indices) u_boundary = Array{uEltype, NDIMS * 2 - 1}(undef, ntuple(_ -> 0, NDIMS * 2 - 1)) @@ -385,8 +411,10 @@ mutable struct BoundaryConditionCoupled{NDIMS, NDIMST2M1, uEltype <: Real, Indic other_orientation = 3 end - new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices)}(u_boundary, other_semi_index, - other_orientation, indices) + new{NDIMS, NDIMS * 2 - 1, uEltype, typeof(indices), + typeof(coupling_converter)}(u_boundary, + other_semi_index, other_orientation, + indices, coupling_converter) end end @@ -395,8 +423,10 @@ function Base.eltype(boundary_condition::BoundaryConditionCoupled) end function (boundary_condition::BoundaryConditionCoupled)(u_inner, orientation, direction, - cell_indices, surface_node_indices, - surface_flux_function, equations) + cell_indices, + surface_node_indices, + surface_flux_function, + equations) # get_node_vars(boundary_condition.u_boundary, equations, solver, surface_node_indices..., cell_indices...), # but we don't have a solver here u_boundary = SVector(ntuple(v -> boundary_condition.u_boundary[v, @@ -421,13 +451,15 @@ function allocate_coupled_boundary_conditions(semi::AbstractSemidiscretization) for direction in 1:n_boundaries boundary_condition = semi.boundary_conditions[direction] - allocate_coupled_boundary_condition(boundary_condition, direction, mesh, equations, + allocate_coupled_boundary_condition(boundary_condition, direction, mesh, + equations, solver) end end # Don't do anything for other BCs than BoundaryConditionCoupled -function allocate_coupled_boundary_condition(boundary_condition, direction, mesh, equations, +function allocate_coupled_boundary_condition(boundary_condition, direction, mesh, + equations, solver) return nothing end @@ -448,43 +480,69 @@ function allocate_coupled_boundary_condition(boundary_condition::BoundaryConditi end # Don't do anything for other BCs than BoundaryConditionCoupled -function copy_to_coupled_boundary!(boundary_condition, u_ode, semi) +function copy_to_coupled_boundary!(boundary_condition, u_ode, semi_coupled, semi) return nothing end +function copy_to_coupled_boundary!(u_ode, semi_coupled, semi, i, n_boundaries, + boundary_condition, boundary_conditions...) + copy_to_coupled_boundary!(boundary_condition, u_ode, semi_coupled, semi) + if i < n_boundaries + copy_to_coupled_boundary!(u_ode, semi_coupled, semi, i + 1, n_boundaries, + boundary_conditions...) + end +end + function copy_to_coupled_boundary!(boundary_conditions::Union{Tuple, NamedTuple}, u_ode, - semi) - for boundary_condition in boundary_conditions - copy_to_coupled_boundary!(boundary_condition, u_ode, semi) + semi_coupled, semi) + copy_to_coupled_boundary!(u_ode, semi_coupled, semi, 1, length(boundary_conditions), + boundary_conditions...) +end + +function mesh_equations_solver_cache(other_semi_index, i, semi_, semi_tuple...) + if i == other_semi_index + return mesh_equations_solver_cache(semi_) + else + # Walk through semidiscretizations until we find `i` + mesh_equations_solver_cache(other_semi_index, i + 1, semi_tuple...) end end # In 2D -function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{2}, u_ode, - semi) - @unpack u_indices = semi +function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{2}, + u_ode, + semi_coupled, semi) + @unpack u_indices = semi_coupled @unpack other_semi_index, other_orientation, indices = boundary_condition + @unpack coupling_converter, u_boundary = boundary_condition + + mesh_own, equations_own, solver_own, cache_own = mesh_equations_solver_cache(semi) + + mesh_other, equations_other, solver_other, cache_other = mesh_equations_solver_cache(other_semi_index, + 1, + semi_coupled.semis...) - mesh, equations, solver, cache = mesh_equations_solver_cache(semi.semis[other_semi_index]) - u = wrap_array(get_system_u_ode(u_ode, other_semi_index, semi), mesh, equations, solver, - cache) + node_coordinates_other = cache_other.elements.node_coordinates + u_ode_other = get_system_u_ode(u_ode, other_semi_index, semi_coupled) + u_other = wrap_array(u_ode_other, mesh_other, equations_other, solver_other, + cache_other) - linear_indices = LinearIndices(size(mesh)) + linear_indices = LinearIndices(size(mesh_other)) if other_orientation == 1 - cells = axes(mesh, 2) + cells = axes(mesh_other, 2) else # other_orientation == 2 - cells = axes(mesh, 1) + cells = axes(mesh_other, 1) end # Copy solution data to the coupled boundary using "delayed indexing" with # a start value and a step size to get the correct face and orientation. - node_index_range = eachnode(solver) + node_index_range = eachnode(solver_other) i_node_start, i_node_step = index_to_start_step_2d(indices[1], node_index_range) j_node_start, j_node_step = index_to_start_step_2d(indices[2], node_index_range) - i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh, 1)) - j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh, 2)) + i_cell_start, i_cell_step = index_to_start_step_2d(indices[1], axes(mesh_other, 1)) + j_cell_start, j_cell_step = index_to_start_step_2d(indices[2], axes(mesh_other, 2)) i_cell = i_cell_start j_cell = j_cell_start @@ -492,16 +550,26 @@ function copy_to_coupled_boundary!(boundary_condition::BoundaryConditionCoupled{ for cell in cells i_node = i_node_start j_node = j_node_start - - for i in eachnode(solver) - for v in 1:size(u, 1) - boundary_condition.u_boundary[v, i, cell] = u[v, i_node, j_node, - linear_indices[i_cell, - j_cell]] + element_id = linear_indices[i_cell, j_cell] + + for element_id in eachnode(solver_other) + x_other = get_node_coords(node_coordinates_other, equations_other, + solver_other, + i_node, j_node, linear_indices[i_cell, j_cell]) + u_node_other = get_node_vars(u_other, equations_other, solver_other, i_node, + j_node, linear_indices[i_cell, j_cell]) + u_node_converted = coupling_converter(x_other, u_node_other, + equations_other, + equations_own) + + for i in eachindex(u_node_converted) + u_boundary[i, element_id, cell] = u_node_converted[i] end + i_node += i_node_step j_node += j_node_step end + i_cell += i_cell_step j_cell += j_cell_step end @@ -511,7 +579,8 @@ end ### DGSEM/structured ################################################################################ -@inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t, orientation, +@inline function calc_boundary_flux_by_direction!(surface_flux_values, u, t, + orientation, boundary_condition::BoundaryConditionCoupled, mesh::StructuredMesh, equations, surface_integral, dg::DG, cache, @@ -531,7 +600,8 @@ end sign_jacobian = sign(inverse_jacobian[node_indices..., element]) # Contravariant vector Ja^i is the normal vector - normal = sign_jacobian * get_contravariant_vector(orientation, contravariant_vectors, + normal = sign_jacobian * + get_contravariant_vector(orientation, contravariant_vectors, node_indices..., element) # If the mapping is orientation-reversing, the normal vector will be reversed (see above). @@ -608,3 +678,4 @@ function analyze_convergence(errors_coupled, iterations, return eoc_mean_values end +end # @muladd diff --git a/src/solvers/dgsem_p4est/containers_3d.jl b/src/solvers/dgsem_p4est/containers_3d.jl index e9994fe4569..7e383924ba7 100644 --- a/src/solvers/dgsem_p4est/containers_3d.jl +++ b/src/solvers/dgsem_p4est/containers_3d.jl @@ -6,7 +6,8 @@ #! format: noindent # Initialize data structures in element container -function init_elements!(elements, mesh::P4estMesh{3}, basis::LobattoLegendreBasis) +function init_elements!(elements, mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + basis::LobattoLegendreBasis) @unpack node_coordinates, jacobian_matrix, contravariant_vectors, inverse_jacobian = elements @@ -26,7 +27,7 @@ end # Interpolate tree_node_coordinates to each quadrant at the nodes of the specified basis function calc_node_coordinates!(node_coordinates, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, basis::LobattoLegendreBasis) # Hanging nodes will cause holes in the mesh if its polydeg is higher # than the polydeg of the solver. diff --git a/src/solvers/dgsem_p4est/containers_parallel.jl b/src/solvers/dgsem_p4est/containers_parallel.jl index 7c7bd868457..fd2749155bb 100644 --- a/src/solvers/dgsem_p4est/containers_parallel.jl +++ b/src/solvers/dgsem_p4est/containers_parallel.jl @@ -43,7 +43,8 @@ function Base.resize!(mpi_interfaces::P4estMPIInterfaceContainer, capacity) end # Create MPI interface container and initialize interface data -function init_mpi_interfaces(mesh::ParallelP4estMesh, equations, basis, elements) +function init_mpi_interfaces(mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, + equations, basis, elements) NDIMS = ndims(elements) uEltype = eltype(elements) @@ -133,7 +134,8 @@ function Base.resize!(mpi_mortars::P4estMPIMortarContainer, capacity) end # Create MPI mortar container and initialize MPI mortar data -function init_mpi_mortars(mesh::ParallelP4estMesh, equations, basis, elements) +function init_mpi_mortars(mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, equations, + basis, elements) NDIMS = ndims(mesh) RealT = real(mesh) uEltype = eltype(elements) diff --git a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl index a7f3345168f..ed21f371449 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parabolic.jl @@ -1,7 +1,15 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + # This method is called when a SemidiscretizationHyperbolicParabolic is constructed. # It constructs the basic `cache` used throughout the simulation to compute # the RHS etc. -function create_cache_parabolic(mesh::P4estMesh{2}, equations_hyperbolic::AbstractEquations, +function create_cache_parabolic(mesh::P4estMesh{2}, + equations_hyperbolic::AbstractEquations, equations_parabolic::AbstractEquationsParabolic, dg::DG, parabolic_scheme, RealT, uEltype) balance!(mesh) @@ -19,9 +27,29 @@ function create_cache_parabolic(mesh::P4estMesh{2}, equations_hyperbolic::Abstra return cache end -# TODO: Remove in favor of the implementation for the TreeMesh -# once the P4estMesh can handle mortars as well -function rhs_parabolic!(du, u, t, mesh::P4estMesh{2}, +#= +Reusing `rhs_parabolic!` for `TreeMesh`es is not easily possible as +for `P4estMesh`es we call + + ``` + prolong2mortars_divergence!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + + calc_mortar_flux_divergence!(cache_parabolic.elements.surface_flux_values, + mesh, equations_parabolic, dg.mortar, + dg.surface_integral, dg, cache) + ``` +instead of + ``` + prolong2mortars!(cache, flux_viscous, mesh, equations_parabolic, + dg.mortar, dg.surface_integral, dg) + + calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, mesh, + equations_parabolic, + dg.mortar, dg.surface_integral, dg, cache) + ``` +=# +function rhs_parabolic!(du, u, t, mesh::Union{P4estMesh{2}, P4estMesh{3}}, equations_parabolic::AbstractEquationsParabolic, initial_condition, boundary_conditions_parabolic, source_terms, dg::DG, parabolic_scheme, cache, cache_parabolic) @@ -147,12 +175,14 @@ function calc_gradient!(gradients, u_transformed, t, element) for ii in eachnode(dg) - multiply_add_to_node_vars!(gradients_x, derivative_dhat[ii, i], u_node, + multiply_add_to_node_vars!(gradients_x, derivative_dhat[ii, i], + u_node, equations_parabolic, dg, ii, j, element) end for jj in eachnode(dg) - multiply_add_to_node_vars!(gradients_y, derivative_dhat[jj, j], u_node, + multiply_add_to_node_vars!(gradients_y, derivative_dhat[jj, j], + u_node, equations_parabolic, dg, i, jj, element) end end @@ -165,9 +195,11 @@ function calc_gradient!(gradients, u_transformed, t, Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, element) - gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, dg, + gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, + dg, i, j, element) - gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, dg, + gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, + dg, i, j, element) # note that the contravariant vectors are transposed compared with computations of flux @@ -179,9 +211,11 @@ function calc_gradient!(gradients, u_transformed, t, gradient_y_node = Ja12 * gradients_reference_1 + Ja22 * gradients_reference_2 - set_node_vars!(gradients_x, gradient_x_node, equations_parabolic, dg, i, j, + set_node_vars!(gradients_x, gradient_x_node, equations_parabolic, dg, i, + j, element) - set_node_vars!(gradients_y, gradient_y_node, equations_parabolic, dg, i, j, + set_node_vars!(gradients_y, gradient_y_node, equations_parabolic, dg, i, + j, element) end end @@ -199,7 +233,8 @@ function calc_gradient!(gradients, u_transformed, t, @trixi_timeit timer() "interface flux" begin calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, False(), # False() = no nonconservative terms - equations_parabolic, dg.surface_integral, dg, cache_parabolic) + equations_parabolic, dg.surface_integral, dg, + cache_parabolic) end # Prolong solution to boundaries @@ -211,7 +246,8 @@ function calc_gradient!(gradients, u_transformed, t, # Calculate boundary fluxes @trixi_timeit timer() "boundary flux" begin calc_boundary_flux_gradients!(cache_parabolic, t, boundary_conditions_parabolic, - mesh, equations_parabolic, dg.surface_integral, dg) + mesh, equations_parabolic, dg.surface_integral, + dg) end # Prolong solution to mortars. This resues the hyperbolic version of `prolong2mortars` @@ -248,70 +284,94 @@ function calc_gradient!(gradients, u_transformed, t, # Compute x-component of gradients # surface at -x - normal_direction_x, _ = get_normal_direction(1, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(1, + contravariant_vectors, 1, l, element) gradients_x[v, 1, l, element] = (gradients_x[v, 1, l, element] + - surface_flux_values[v, l, 1, element] * + surface_flux_values[v, l, 1, + element] * factor_1 * normal_direction_x) # surface at +x - normal_direction_x, _ = get_normal_direction(2, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(2, + contravariant_vectors, nnodes(dg), l, element) - gradients_x[v, nnodes(dg), l, element] = (gradients_x[v, nnodes(dg), l, + gradients_x[v, nnodes(dg), l, element] = (gradients_x[v, nnodes(dg), + l, element] + - surface_flux_values[v, l, 2, + surface_flux_values[v, l, + 2, element] * - factor_2 * normal_direction_x) + factor_2 * + normal_direction_x) # surface at -y - normal_direction_x, _ = get_normal_direction(3, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(3, + contravariant_vectors, l, 1, element) gradients_x[v, l, 1, element] = (gradients_x[v, l, 1, element] + - surface_flux_values[v, l, 3, element] * + surface_flux_values[v, l, 3, + element] * factor_1 * normal_direction_x) # surface at +y - normal_direction_x, _ = get_normal_direction(4, contravariant_vectors, + normal_direction_x, _ = get_normal_direction(4, + contravariant_vectors, l, nnodes(dg), element) - gradients_x[v, l, nnodes(dg), element] = (gradients_x[v, l, nnodes(dg), + gradients_x[v, l, nnodes(dg), element] = (gradients_x[v, l, + nnodes(dg), element] + - surface_flux_values[v, l, 4, + surface_flux_values[v, l, + 4, element] * - factor_2 * normal_direction_x) + factor_2 * + normal_direction_x) # Compute y-component of gradients # surface at -x - _, normal_direction_y = get_normal_direction(1, contravariant_vectors, + _, normal_direction_y = get_normal_direction(1, + contravariant_vectors, 1, l, element) gradients_y[v, 1, l, element] = (gradients_y[v, 1, l, element] + - surface_flux_values[v, l, 1, element] * + surface_flux_values[v, l, 1, + element] * factor_1 * normal_direction_y) # surface at +x - _, normal_direction_y = get_normal_direction(2, contravariant_vectors, + _, normal_direction_y = get_normal_direction(2, + contravariant_vectors, nnodes(dg), l, element) - gradients_y[v, nnodes(dg), l, element] = (gradients_y[v, nnodes(dg), l, + gradients_y[v, nnodes(dg), l, element] = (gradients_y[v, nnodes(dg), + l, element] + - surface_flux_values[v, l, 2, + surface_flux_values[v, l, + 2, element] * - factor_2 * normal_direction_y) + factor_2 * + normal_direction_y) # surface at -y - _, normal_direction_y = get_normal_direction(3, contravariant_vectors, + _, normal_direction_y = get_normal_direction(3, + contravariant_vectors, l, 1, element) gradients_y[v, l, 1, element] = (gradients_y[v, l, 1, element] + - surface_flux_values[v, l, 3, element] * + surface_flux_values[v, l, 3, + element] * factor_1 * normal_direction_y) # surface at +y - _, normal_direction_y = get_normal_direction(4, contravariant_vectors, + _, normal_direction_y = get_normal_direction(4, + contravariant_vectors, l, nnodes(dg), element) - gradients_y[v, l, nnodes(dg), element] = (gradients_y[v, l, nnodes(dg), + gradients_y[v, l, nnodes(dg), element] = (gradients_y[v, l, + nnodes(dg), element] + - surface_flux_values[v, l, 4, + surface_flux_values[v, l, + 4, element] * - factor_2 * normal_direction_y) + factor_2 * + normal_direction_y) end end end @@ -424,24 +484,30 @@ function calc_volume_integral!(du, flux_viscous, @threaded for element in eachelement(dg, cache) # Calculate volume terms in one element for j in eachnode(dg), i in eachnode(dg) - flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, element) - flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, element) + flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, + element) + flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, + element) # Compute the contravariant flux by taking the scalar product of the # first contravariant vector Ja^1 and the flux vector - Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, element) + Ja11, Ja12 = get_contravariant_vector(1, contravariant_vectors, i, j, + element) contravariant_flux1 = Ja11 * flux1 + Ja12 * flux2 for ii in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[ii, i], contravariant_flux1, + multiply_add_to_node_vars!(du, derivative_dhat[ii, i], + contravariant_flux1, equations_parabolic, dg, ii, j, element) end # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, element) + Ja21, Ja22 = get_contravariant_vector(2, contravariant_vectors, i, j, + element) contravariant_flux2 = Ja21 * flux1 + Ja22 * flux2 for jj in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[jj, j], contravariant_flux2, + multiply_add_to_node_vars!(du, derivative_dhat[jj, j], + contravariant_flux2, equations_parabolic, dg, i, jj, element) end end @@ -483,7 +549,8 @@ function prolong2interfaces!(cache_parabolic, flux_viscous, # this is the outward normal direction on the primary element normal_direction = get_normal_direction(primary_direction, contravariant_vectors, - i_primary, j_primary, primary_element) + i_primary, j_primary, + primary_element) for v in eachvariable(equations_parabolic) # OBS! `interfaces.u` stores the interpolated *fluxes* and *not the solution*! @@ -582,7 +649,8 @@ function calc_interface_flux!(surface_flux_values, # primary element. We assume a BR-1 type of flux. viscous_flux_normal_ll, viscous_flux_normal_rr = get_surface_node_vars(cache_parabolic.interfaces.u, equations_parabolic, - dg, node, + dg, + node, interface) flux = 0.5 * (viscous_flux_normal_ll + viscous_flux_normal_rr) @@ -604,9 +672,11 @@ function calc_interface_flux!(surface_flux_values, end function prolong2mortars_divergence!(cache, flux_viscous::Vector{Array{uEltype, 4}}, - mesh::Union{P4estMesh{2}, T8codeMesh{2}}, equations, + mesh::Union{P4estMesh{2}, T8codeMesh{2}}, + equations, mortar_l2::LobattoLegendreMortarL2, - surface_integral, dg::DGSEM) where {uEltype <: Real} + surface_integral, + dg::DGSEM) where {uEltype <: Real} @unpack neighbor_ids, node_indices = cache.mortars @unpack contravariant_vectors = cache.elements index_range = eachnode(dg) @@ -663,7 +733,8 @@ function prolong2mortars_divergence!(cache, flux_viscous::Vector{Array{uEltype, j_large = j_large_start element = neighbor_ids[3, mortar] for i in eachnode(dg) - normal_direction = get_normal_direction(direction_index, contravariant_vectors, + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, i_large, j_large, element) for v in eachvariable(equations) @@ -712,8 +783,10 @@ function calc_mortar_flux_divergence!(surface_flux_values, for position in 1:2 for node in eachnode(dg) for v in eachvariable(equations) - viscous_flux_normal_ll = cache.mortars.u[1, v, position, node, mortar] - viscous_flux_normal_rr = cache.mortars.u[2, v, position, node, mortar] + viscous_flux_normal_ll = cache.mortars.u[1, v, position, node, + mortar] + viscous_flux_normal_rr = cache.mortars.u[2, v, position, node, + mortar] # TODO: parabolic; only BR1 at the moment fstar[position][v, node] = 0.5 * (viscous_flux_normal_ll + @@ -804,7 +877,8 @@ end function calc_boundary_flux_gradients!(cache, t, boundary_condition::Union{BoundaryConditionPeriodic, BoundaryConditionDoNothing}, - mesh::P4estMesh, equations, surface_integral, dg::DG) + mesh::P4estMesh, equations, surface_integral, + dg::DG) @assert isempty(eachboundary(dg, cache)) end @@ -893,7 +967,8 @@ function calc_boundary_flux!(cache, t, boundary_index) # Outward-pointing normal direction (not normalized) - normal_direction = get_normal_direction(direction_index, contravariant_vectors, + normal_direction = get_normal_direction(direction_index, + contravariant_vectors, i_node, j_node, element) # TODO: revisit if we want more general boundary treatments. @@ -902,11 +977,13 @@ function calc_boundary_flux!(cache, t, flux_inner = u_inner # Coordinates at boundary node - x = get_node_coords(node_coordinates, equations_parabolic, dg, i_node, j_node, + x = get_node_coords(node_coordinates, equations_parabolic, dg, i_node, + j_node, element) flux_ = boundary_condition_parabolic(flux_inner, u_inner, normal_direction, - x, t, operator_type, equations_parabolic) + x, t, operator_type, + equations_parabolic) # Copy flux to element storage in the correct orientation for v in eachvariable(equations_parabolic) @@ -918,3 +995,22 @@ function calc_boundary_flux!(cache, t, end end end + +function apply_jacobian_parabolic!(du, mesh::P4estMesh{2}, + equations::AbstractEquationsParabolic, + dg::DG, cache) + @unpack inverse_jacobian = cache.elements + + @threaded for element in eachelement(dg, cache) + for j in eachnode(dg), i in eachnode(dg) + factor = inverse_jacobian[i, j, element] + + for v in eachvariable(equations) + du[v, i, j, element] *= factor + end + end + end + + return nothing +end +end # @muladd diff --git a/src/solvers/dgsem_p4est/dg_2d_parallel.jl b/src/solvers/dgsem_p4est/dg_2d_parallel.jl index a8887351c46..3bf0cd0cab5 100644 --- a/src/solvers/dgsem_p4est/dg_2d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_2d_parallel.jl @@ -6,7 +6,8 @@ #! format: noindent function prolong2mpiinterfaces!(cache, u, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, equations, surface_integral, dg::DG) @unpack mpi_interfaces = cache index_range = eachnode(dg) @@ -43,7 +44,8 @@ function prolong2mpiinterfaces!(cache, u, end function calc_mpi_interface_flux!(surface_flux_values, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, nonconservative_terms, equations, surface_integral, dg::DG, cache) @unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces @@ -106,7 +108,8 @@ end # Inlined version of the interface flux computation for conservation laws @inline function calc_mpi_interface_flux!(surface_flux_values, - mesh::P4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -131,7 +134,8 @@ end end function prolong2mpimortars!(cache, u, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, + equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack node_indices = cache.mpi_mortars @@ -199,7 +203,7 @@ function prolong2mpimortars!(cache, u, end function calc_mpi_mortar_flux!(surface_flux_values, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, ParallelT8codeMesh{2}}, nonconservative_terms, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DG, cache) @@ -253,7 +257,8 @@ end # Inlined version of the mortar flux computation on small elements for conservation laws @inline function calc_mpi_mortar_flux!(fstar, - mesh::ParallelP4estMesh{2}, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -271,7 +276,9 @@ end end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, - mesh::ParallelP4estMesh{2}, equations, + mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, + equations, mortar_l2::LobattoLegendreMortarL2, dg::DGSEM, cache, mortar, fstar, u_buffer) diff --git a/src/solvers/dgsem_p4est/dg_3d.jl b/src/solvers/dgsem_p4est/dg_3d.jl index 4c0845ba9af..5b3c5ae5ca8 100644 --- a/src/solvers/dgsem_p4est/dg_3d.jl +++ b/src/solvers/dgsem_p4est/dg_3d.jl @@ -7,8 +7,8 @@ # The methods below are specialized on the mortar type # and called from the basic `create_cache` method at the top. -function create_cache(mesh::P4estMesh{3}, equations, mortar_l2::LobattoLegendreMortarL2, - uEltype) +function create_cache(mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, + mortar_l2::LobattoLegendreMortarL2, uEltype) # TODO: Taal compare performance of different types fstar_threaded = [Array{uEltype, 4}(undef, nvariables(equations), nnodes(mortar_l2), nnodes(mortar_l2), 4) @@ -88,7 +88,7 @@ end # We pass the `surface_integral` argument solely for dispatch function prolong2interfaces!(cache, u, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, surface_integral, dg::DG) @unpack interfaces = cache index_range = eachnode(dg) @@ -163,7 +163,7 @@ function prolong2interfaces!(cache, u, end function calc_interface_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms, equations, surface_integral, dg::DG, cache) @unpack neighbor_ids, node_indices = cache.interfaces @@ -244,7 +244,7 @@ end # Inlined function for interface flux computation for conservative flux terms @inline function calc_interface_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -271,7 +271,7 @@ end # Inlined function for interface flux computation for flux + nonconservative terms @inline function calc_interface_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms::True, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -314,7 +314,7 @@ end end function prolong2boundaries!(cache, u, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, surface_integral, dg::DG) @unpack boundaries = cache index_range = eachnode(dg) @@ -355,7 +355,7 @@ function prolong2boundaries!(cache, u, end function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, surface_integral, dg::DG) @unpack boundaries = cache @unpack surface_flux_values, node_coordinates, contravariant_vectors = cache.elements @@ -417,7 +417,7 @@ function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing, end function prolong2mortars!(cache, u, - mesh::P4estMesh{3}, equations, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack fstar_tmp_threaded = cache @@ -521,7 +521,7 @@ function prolong2mortars!(cache, u, end function calc_mortar_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DG, cache) @@ -595,7 +595,7 @@ end # Inlined version of the mortar flux computation on small elements for conservation fluxes @inline function calc_mortar_flux!(fstar, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -616,7 +616,7 @@ end # Inlined version of the mortar flux computation on small elements for conservation fluxes # with nonconservative terms @inline function calc_mortar_flux!(fstar, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, nonconservative_terms::True, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -643,7 +643,8 @@ end end @inline function mortar_fluxes_to_elements!(surface_flux_values, - mesh::P4estMesh{3}, equations, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, dg::DGSEM, cache, mortar, fstar, u_buffer, fstar_tmp) @@ -727,7 +728,7 @@ end end function calc_surface_integral!(du, u, - mesh::P4estMesh{3}, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, surface_integral::SurfaceIntegralWeakForm, dg::DGSEM, cache) diff --git a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl index 0bb97c7af02..63d431d35d5 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parabolic.jl @@ -1,7 +1,15 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + # This method is called when a SemidiscretizationHyperbolicParabolic is constructed. # It constructs the basic `cache` used throughout the simulation to compute # the RHS etc. -function create_cache_parabolic(mesh::P4estMesh{3}, equations_hyperbolic::AbstractEquations, +function create_cache_parabolic(mesh::P4estMesh{3}, + equations_hyperbolic::AbstractEquations, equations_parabolic::AbstractEquationsParabolic, dg::DG, parabolic_scheme, RealT, uEltype) balance!(mesh) @@ -19,118 +27,6 @@ function create_cache_parabolic(mesh::P4estMesh{3}, equations_hyperbolic::Abstra return cache end -# This file collects all methods that have been updated to work with parabolic systems of equations -# -# assumptions: parabolic terms are of the form div(f(u, grad(u))) and -# will be discretized first order form as follows: -# 1. compute grad(u) -# 2. compute f(u, grad(u)) -# 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) -# boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -# TODO: Remove in favor of the implementation for the TreeMesh -# once the P4estMesh can handle mortars as well -function rhs_parabolic!(du, u, t, mesh::P4estMesh{3}, - equations_parabolic::AbstractEquationsParabolic, - initial_condition, boundary_conditions_parabolic, source_terms, - dg::DG, parabolic_scheme, cache, cache_parabolic) - @unpack viscous_container = cache_parabolic - @unpack u_transformed, gradients, flux_viscous = viscous_container - - # Convert conservative variables to a form more suitable for viscous flux calculations - @trixi_timeit timer() "transform variables" begin - transform_variables!(u_transformed, u, mesh, equations_parabolic, - dg, parabolic_scheme, cache, cache_parabolic) - end - - # Compute the gradients of the transformed variables - @trixi_timeit timer() "calculate gradient" begin - calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, - boundary_conditions_parabolic, dg, cache, cache_parabolic) - end - - # Compute and store the viscous fluxes - @trixi_timeit timer() "calculate viscous fluxes" begin - calc_viscous_fluxes!(flux_viscous, gradients, u_transformed, mesh, - equations_parabolic, dg, cache, cache_parabolic) - end - - # The remainder of this function is essentially a regular rhs! for parabolic - # equations (i.e., it computes the divergence of the viscous fluxes) - # - # OBS! In `calc_viscous_fluxes!`, the viscous flux values at the volume nodes of each element have - # been computed and stored in `fluxes_viscous`. In the following, we *reuse* (abuse) the - # `interfaces` and `boundaries` containers in `cache_parabolic` to interpolate and store the - # *fluxes* at the element surfaces, as opposed to interpolating and storing the *solution* (as it - # is done in the hyperbolic operator). That is, `interfaces.u`/`boundaries.u` store *viscous flux values* - # and *not the solution*. The advantage is that a) we do not need to allocate more storage, b) we - # do not need to recreate the existing data structure only with a different name, and c) we do not - # need to interpolate solutions *and* gradients to the surfaces. - - # TODO: parabolic; reconsider current data structure reuse strategy - - # Reset du - @trixi_timeit timer() "reset ∂u/∂t" reset_du!(du, dg, cache) - - # Calculate volume integral - @trixi_timeit timer() "volume integral" begin - calc_volume_integral!(du, flux_viscous, mesh, equations_parabolic, dg, cache) - end - - # Prolong solution to interfaces - @trixi_timeit timer() "prolong2interfaces" begin - prolong2interfaces!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate interface fluxes - @trixi_timeit timer() "interface flux" begin - calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, - equations_parabolic, dg, cache_parabolic) - end - - # Prolong solution to boundaries - @trixi_timeit timer() "prolong2boundaries" begin - prolong2boundaries!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate boundary fluxes - @trixi_timeit timer() "boundary flux" begin - calc_boundary_flux_divergence!(cache_parabolic, t, - boundary_conditions_parabolic, - mesh, equations_parabolic, - dg.surface_integral, dg) - end - - # Prolong solution to mortars (specialized for AbstractEquationsParabolic) - # !!! NOTE: we reuse the hyperbolic cache here since it contains "mortars" and "u_threaded" - # !!! Is this OK? - @trixi_timeit timer() "prolong2mortars" begin - prolong2mortars_divergence!(cache, flux_viscous, mesh, equations_parabolic, - dg.mortar, dg.surface_integral, dg) - end - - # Calculate mortar fluxes (specialized for AbstractEquationsParabolic) - @trixi_timeit timer() "mortar flux" begin - calc_mortar_flux_divergence!(cache_parabolic.elements.surface_flux_values, - mesh, equations_parabolic, dg.mortar, - dg.surface_integral, dg, cache) - end - - # Calculate surface integrals - @trixi_timeit timer() "surface integral" begin - calc_surface_integral!(du, u, mesh, equations_parabolic, - dg.surface_integral, dg, cache_parabolic) - end - - # Apply Jacobian from mapping to reference element - @trixi_timeit timer() "Jacobian" begin - apply_jacobian_parabolic!(du, mesh, equations_parabolic, dg, cache_parabolic) - end - - return nothing -end - function calc_gradient!(gradients, u_transformed, t, mesh::P4estMesh{3}, equations_parabolic, boundary_conditions_parabolic, dg::DG, @@ -185,11 +81,14 @@ function calc_gradient!(gradients, u_transformed, t, Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, k, element) - gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, dg, + gradients_reference_1 = get_node_vars(gradients_x, equations_parabolic, + dg, i, j, k, element) - gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, dg, + gradients_reference_2 = get_node_vars(gradients_y, equations_parabolic, + dg, i, j, k, element) - gradients_reference_3 = get_node_vars(gradients_z, equations_parabolic, dg, + gradients_reference_3 = get_node_vars(gradients_z, equations_parabolic, + dg, i, j, k, element) # note that the contravariant vectors are transposed compared with computations of flux @@ -227,7 +126,8 @@ function calc_gradient!(gradients, u_transformed, t, @trixi_timeit timer() "interface flux" begin calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, False(), # False() = no nonconservative terms - equations_parabolic, dg.surface_integral, dg, cache_parabolic) + equations_parabolic, dg.surface_integral, dg, + cache_parabolic) end # Prolong solution to boundaries @@ -239,7 +139,8 @@ function calc_gradient!(gradients, u_transformed, t, # Calculate boundary fluxes @trixi_timeit timer() "boundary flux" begin calc_boundary_flux_gradients!(cache_parabolic, t, boundary_conditions_parabolic, - mesh, equations_parabolic, dg.surface_integral, dg) + mesh, equations_parabolic, dg.surface_integral, + dg) end # Prolong solution to mortars. These should reuse the hyperbolic version of `prolong2mortars` @@ -277,7 +178,8 @@ function calc_gradient!(gradients, u_transformed, t, for dim in 1:3 grad = gradients[dim] # surface at -x - normal_direction = get_normal_direction(1, contravariant_vectors, + normal_direction = get_normal_direction(1, + contravariant_vectors, 1, l, m, element) grad[v, 1, l, m, element] = (grad[v, 1, l, m, element] + surface_flux_values[v, l, m, 1, @@ -285,18 +187,22 @@ function calc_gradient!(gradients, u_transformed, t, factor_1 * normal_direction[dim]) # surface at +x - normal_direction = get_normal_direction(2, contravariant_vectors, - nnodes(dg), l, m, element) + normal_direction = get_normal_direction(2, + contravariant_vectors, + nnodes(dg), l, m, + element) grad[v, nnodes(dg), l, m, element] = (grad[v, nnodes(dg), l, m, element] + - surface_flux_values[v, l, m, + surface_flux_values[v, l, + m, 2, element] * factor_2 * normal_direction[dim]) # surface at -y - normal_direction = get_normal_direction(3, contravariant_vectors, + normal_direction = get_normal_direction(3, + contravariant_vectors, l, m, 1, element) grad[v, l, 1, m, element] = (grad[v, l, 1, m, element] + surface_flux_values[v, l, m, 3, @@ -304,18 +210,22 @@ function calc_gradient!(gradients, u_transformed, t, factor_1 * normal_direction[dim]) # surface at +y - normal_direction = get_normal_direction(4, contravariant_vectors, - l, nnodes(dg), m, element) + normal_direction = get_normal_direction(4, + contravariant_vectors, + l, nnodes(dg), m, + element) grad[v, l, nnodes(dg), m, element] = (grad[v, l, nnodes(dg), m, element] + - surface_flux_values[v, l, m, + surface_flux_values[v, l, + m, 4, element] * factor_2 * normal_direction[dim]) # surface at -z - normal_direction = get_normal_direction(5, contravariant_vectors, + normal_direction = get_normal_direction(5, + contravariant_vectors, l, m, 1, element) grad[v, l, m, 1, element] = (grad[v, l, m, 1, element] + surface_flux_values[v, l, m, 5, @@ -323,11 +233,14 @@ function calc_gradient!(gradients, u_transformed, t, factor_1 * normal_direction[dim]) # surface at +z - normal_direction = get_normal_direction(6, contravariant_vectors, - l, m, nnodes(dg), element) + normal_direction = get_normal_direction(6, + contravariant_vectors, + l, m, nnodes(dg), + element) grad[v, l, m, nnodes(dg), element] = (grad[v, l, m, nnodes(dg), element] + - surface_flux_values[v, l, m, + surface_flux_values[v, l, + m, 6, element] * factor_2 * @@ -478,37 +391,46 @@ function calc_volume_integral!(du, flux_viscous, @threaded for element in eachelement(dg, cache) # Calculate volume terms in one element for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) - flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, k, element) - flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, k, element) - flux3 = get_node_vars(flux_viscous_z, equations_parabolic, dg, i, j, k, element) + flux1 = get_node_vars(flux_viscous_x, equations_parabolic, dg, i, j, k, + element) + flux2 = get_node_vars(flux_viscous_y, equations_parabolic, dg, i, j, k, + element) + flux3 = get_node_vars(flux_viscous_z, equations_parabolic, dg, i, j, k, + element) # Compute the contravariant flux by taking the scalar product of the # first contravariant vector Ja^1 and the flux vector - Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, k, + Ja11, Ja12, Ja13 = get_contravariant_vector(1, contravariant_vectors, i, j, + k, element) contravariant_flux1 = Ja11 * flux1 + Ja12 * flux2 + Ja13 * flux3 for ii in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[ii, i], contravariant_flux1, + multiply_add_to_node_vars!(du, derivative_dhat[ii, i], + contravariant_flux1, equations_parabolic, dg, ii, j, k, element) end # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, k, + Ja21, Ja22, Ja23 = get_contravariant_vector(2, contravariant_vectors, i, j, + k, element) contravariant_flux2 = Ja21 * flux1 + Ja22 * flux2 + Ja23 * flux3 for jj in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[jj, j], contravariant_flux2, + multiply_add_to_node_vars!(du, derivative_dhat[jj, j], + contravariant_flux2, equations_parabolic, dg, i, jj, k, element) end # Compute the contravariant flux by taking the scalar product of the # second contravariant vector Ja^2 and the flux vector - Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, k, + Ja31, Ja32, Ja33 = get_contravariant_vector(3, contravariant_vectors, i, j, + k, element) contravariant_flux3 = Ja31 * flux1 + Ja32 * flux2 + Ja33 * flux3 for kk in eachnode(dg) - multiply_add_to_node_vars!(du, derivative_dhat[kk, k], contravariant_flux3, + multiply_add_to_node_vars!(du, derivative_dhat[kk, k], + contravariant_flux3, equations_parabolic, dg, i, j, kk, element) end end @@ -686,7 +608,8 @@ function calc_interface_flux!(surface_flux_values, viscous_flux_normal_ll, viscous_flux_normal_rr = get_surface_node_vars(cache_parabolic.interfaces.u, equations_parabolic, dg, - i, j, + i, + j, interface) flux = 0.5 * (viscous_flux_normal_ll + viscous_flux_normal_rr) @@ -718,7 +641,8 @@ function calc_interface_flux!(surface_flux_values, end function prolong2mortars_divergence!(cache, flux_viscous, - mesh::Union{P4estMesh{3}, T8codeMesh{3}}, equations, + mesh::Union{P4estMesh{3}, T8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack neighbor_ids, node_indices = cache.mortars @@ -754,11 +678,14 @@ function prolong2mortars_divergence!(cache, flux_viscous, element) for v in eachvariable(equations) - flux_viscous = SVector(flux_viscous_x[v, i_small, j_small, k_small, + flux_viscous = SVector(flux_viscous_x[v, i_small, j_small, + k_small, element], - flux_viscous_y[v, i_small, j_small, k_small, + flux_viscous_y[v, i_small, j_small, + k_small, element], - flux_viscous_z[v, i_small, j_small, k_small, + flux_viscous_z[v, i_small, j_small, + k_small, element]) cache.mortars.u[1, v, position, i, j, mortar] = dot(flux_viscous, @@ -800,7 +727,8 @@ function prolong2mortars_divergence!(cache, flux_viscous, for i in eachnode(dg) normal_direction = get_normal_direction(direction_index, contravariant_vectors, - i_large, j_large, k_large, element) + i_large, j_large, k_large, + element) for v in eachvariable(equations) flux_viscous = SVector(flux_viscous_x[v, i_large, j_large, k_large, @@ -939,7 +867,8 @@ end # TODO: parabolic; only BR1 at the moment flux_ = 0.5 * (u_ll + u_rr) # Copy flux to buffer - set_node_vars!(fstar, flux_, equations, dg, i_node_index, j_node_index, position_index) + set_node_vars!(fstar, flux_, equations, dg, i_node_index, j_node_index, + position_index) end # TODO: parabolic, finish implementing `calc_boundary_flux_gradients!` and `calc_boundary_flux_divergence!` @@ -974,7 +903,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous, for j in eachnode(dg) for i in eachnode(dg) # this is the outward normal direction on the primary element - normal_direction = get_normal_direction(direction, contravariant_vectors, + normal_direction = get_normal_direction(direction, + contravariant_vectors, i_node, j_node, k_node, element) for v in eachvariable(equations_parabolic) @@ -985,7 +915,8 @@ function prolong2boundaries!(cache_parabolic, flux_viscous, flux_viscous_z[v, i_node, j_node, k_node, element]) - boundaries.u[v, i, j, boundary] = dot(flux_viscous, normal_direction) + boundaries.u[v, i, j, boundary] = dot(flux_viscous, + normal_direction) end i_node += i_node_step_i j_node += j_node_step_i @@ -1052,7 +983,8 @@ function calc_boundary_flux!(cache, t, j_node, k_node, element) - flux_ = boundary_condition_parabolic(flux_inner, u_inner, normal_direction, + flux_ = boundary_condition_parabolic(flux_inner, u_inner, + normal_direction, x, t, operator_type, equations_parabolic) @@ -1071,3 +1003,22 @@ function calc_boundary_flux!(cache, t, end end end + +function apply_jacobian_parabolic!(du, mesh::P4estMesh{3}, + equations::AbstractEquationsParabolic, + dg::DG, cache) + @unpack inverse_jacobian = cache.elements + + @threaded for element in eachelement(dg, cache) + for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) + factor = inverse_jacobian[i, j, k, element] + + for v in eachvariable(equations) + du[v, i, j, k, element] *= factor + end + end + end + + return nothing +end +end # @muladd diff --git a/src/solvers/dgsem_p4est/dg_3d_parallel.jl b/src/solvers/dgsem_p4est/dg_3d_parallel.jl index 13bf2a1a2eb..e504e06d2c4 100644 --- a/src/solvers/dgsem_p4est/dg_3d_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_3d_parallel.jl @@ -6,7 +6,7 @@ #! format: noindent function rhs!(du, u, t, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, equations, initial_condition, boundary_conditions, source_terms::Source, dg::DG, cache) where {Source} # Start to receive MPI data @@ -113,7 +113,8 @@ function rhs!(du, u, t, end function prolong2mpiinterfaces!(cache, u, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, equations, surface_integral, dg::DG) @unpack mpi_interfaces = cache index_range = eachnode(dg) @@ -160,7 +161,8 @@ function prolong2mpiinterfaces!(cache, u, end function calc_mpi_interface_flux!(surface_flux_values, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, nonconservative_terms, equations, surface_integral, dg::DG, cache) @unpack local_neighbor_ids, node_indices, local_sides = cache.mpi_interfaces @@ -237,7 +239,8 @@ end # Inlined version of the interface flux computation for conservation laws @inline function calc_mpi_interface_flux!(surface_flux_values, - mesh::P4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, interface_index, normal_direction, @@ -265,7 +268,8 @@ end end function prolong2mpimortars!(cache, u, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DGSEM) @unpack node_indices = cache.mpi_mortars @@ -374,7 +378,7 @@ function prolong2mpimortars!(cache, u, end function calc_mpi_mortar_flux!(surface_flux_values, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, ParallelT8codeMesh{3}}, nonconservative_terms, equations, mortar_l2::LobattoLegendreMortarL2, surface_integral, dg::DG, cache) @@ -437,7 +441,8 @@ end # Inlined version of the mortar flux computation on small elements for conservation laws @inline function calc_mpi_mortar_flux!(fstar, - mesh::ParallelP4estMesh{3}, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, nonconservative_terms::False, equations, surface_integral, dg::DG, cache, mortar_index, position_index, normal_direction, @@ -456,7 +461,9 @@ end end @inline function mpi_mortar_fluxes_to_elements!(surface_flux_values, - mesh::ParallelP4estMesh{3}, equations, + mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, + equations, mortar_l2::LobattoLegendreMortarL2, dg::DGSEM, cache, mortar, fstar, u_buffer, fstar_tmp) diff --git a/src/solvers/dgsem_p4est/dg_parallel.jl b/src/solvers/dgsem_p4est/dg_parallel.jl index 712ede2bfce..eaa6ab5cee2 100644 --- a/src/solvers/dgsem_p4est/dg_parallel.jl +++ b/src/solvers/dgsem_p4est/dg_parallel.jl @@ -166,7 +166,8 @@ end # at `index_base`+1 in the MPI buffer. `data_size` is the data size associated with each small # position (i.e. position 1 or 2). The data corresponding to the large side (i.e. position 3) has # size `2 * data_size`. -@inline function buffer_mortar_indices(mesh::ParallelP4estMesh{2}, index_base, +@inline function buffer_mortar_indices(mesh::Union{ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, index_base, data_size) return ( # first, last for local element in position 1 (small element) @@ -185,7 +186,8 @@ end # at `index_base`+1 in the MPI buffer. `data_size` is the data size associated with each small # position (i.e. position 1 to 4). The data corresponding to the large side (i.e. position 5) has # size `4 * data_size`. -@inline function buffer_mortar_indices(mesh::ParallelP4estMesh{3}, index_base, +@inline function buffer_mortar_indices(mesh::Union{ParallelP4estMesh{3}, + ParallelT8codeMesh{3}}, index_base, data_size) return ( # first, last for local element in position 1 (small element) @@ -491,7 +493,8 @@ end # Exchange normal directions of small elements of the MPI mortars. They are needed on all involved # MPI ranks to calculate the mortar fluxes. -function exchange_normal_directions!(mpi_mortars, mpi_cache, mesh::ParallelP4estMesh, +function exchange_normal_directions!(mpi_mortars, mpi_cache, + mesh::Union{ParallelP4estMesh, ParallelT8codeMesh}, n_nodes) RealT = real(mesh) n_dims = ndims(mesh) diff --git a/src/solvers/dgsem_structured/dg_3d.jl b/src/solvers/dgsem_structured/dg_3d.jl index cdb085e9008..1df9f408895 100644 --- a/src/solvers/dgsem_structured/dg_3d.jl +++ b/src/solvers/dgsem_structured/dg_3d.jl @@ -58,7 +58,8 @@ See also https://github.com/trixi-framework/Trixi.jl/issues/1671#issuecomment-17 =# @inline function weak_form_kernel!(du, u, element, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms::False, equations, dg::DGSEM, cache, alpha = true) # true * [some floating point value] == [exactly the same floating point value] @@ -115,7 +116,8 @@ end # the physical fluxes in each Cartesian direction @inline function flux_differencing_kernel!(du, u, element, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms::False, equations, volume_flux, dg::DGSEM, cache, alpha = true) # true * [some floating point value] == [exactly the same floating point value] @@ -189,7 +191,8 @@ end @inline function flux_differencing_kernel!(du, u, element, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms::True, equations, volume_flux, dg::DGSEM, cache, alpha = true) @unpack derivative_split = dg.basis @@ -274,7 +277,8 @@ end # [arXiv: 2008.12044v2](https://arxiv.org/pdf/2008.12044) @inline function calcflux_fv!(fstar1_L, fstar1_R, fstar2_L, fstar2_R, fstar3_L, fstar3_R, u, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms::False, equations, volume_flux_fv, dg::DGSEM, element, cache) @unpack contravariant_vectors = cache.elements @@ -369,7 +373,8 @@ end # # Calculate the finite volume fluxes inside curvilinear elements (**with non-conservative terms**). @inline function calcflux_fv!(fstar1_L, fstar1_R, fstar2_L, fstar2_R, fstar3_L, fstar3_R, u, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms::True, equations, volume_flux_fv, dg::DGSEM, element, cache) @unpack contravariant_vectors = cache.elements @@ -783,7 +788,7 @@ function calc_boundary_flux!(cache, u, t, boundary_conditions::NamedTuple, end function apply_jacobian!(du, - mesh::Union{StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{StructuredMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations, dg::DG, cache) @threaded for element in eachelement(dg, cache) for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) diff --git a/src/solvers/dgsem_t8code/containers.jl b/src/solvers/dgsem_t8code/containers.jl index 093feb2985a..d7ff79fbf2f 100644 --- a/src/solvers/dgsem_t8code/containers.jl +++ b/src/solvers/dgsem_t8code/containers.jl @@ -18,19 +18,22 @@ function reinitialize_containers!(mesh::T8codeMesh, equations, dg::DGSEM, cache) @unpack boundaries = cache resize!(boundaries, mesh.nboundaries) - trixi_t8_fill_mesh_info(mesh.forest, elements, interfaces, mortars, boundaries, - mesh.boundary_names) + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names) return nothing end function count_required_surfaces!(mesh::T8codeMesh) - counts = trixi_t8_count_interfaces(mesh.forest) + counts = count_interfaces(mesh) mesh.nmortars = counts.mortars mesh.ninterfaces = counts.interfaces mesh.nboundaries = counts.boundaries + mesh.nmpimortars = counts.mpi_mortars + mesh.nmpiinterfaces = counts.mpi_interfaces + return counts end @@ -38,7 +41,9 @@ end function count_required_surfaces(mesh::T8codeMesh) return (interfaces = mesh.ninterfaces, mortars = mesh.nmortars, - boundaries = mesh.nboundaries) + boundaries = mesh.nboundaries, + mpi_interfaces = mesh.nmpiinterfaces, + mpi_mortars = mesh.nmpimortars) end # Compatibility to `dgsem_p4est/containers.jl`. diff --git a/src/solvers/dgsem_t8code/containers_2d.jl b/src/solvers/dgsem_t8code/containers_2d.jl index 029e6674afb..ce525bfdf65 100644 --- a/src/solvers/dgsem_t8code/containers_2d.jl +++ b/src/solvers/dgsem_t8code/containers_2d.jl @@ -1,3 +1,7 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. @muladd begin #! format: noindent @@ -22,11 +26,15 @@ function calc_node_coordinates!(node_coordinates, tree_class = t8_forest_get_tree_class(mesh.forest, itree) eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + global_itree = t8_forest_global_tree_id(mesh.forest, itree) for ielement in 0:(num_elements_in_tree - 1) element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) element_level = t8_element_level(eclass_scheme, element) + # Note, `t8_quad_len` is encoded as an integer (Morton encoding) in + # relation to `t8_quad_root_len`. This line transforms the + # "integer" length to a float in relation to the unit interval [0,1]. element_length = t8_quad_len(element_level) / t8_quad_root_len element_coords = Array{Float64}(undef, 3) @@ -48,11 +56,23 @@ function calc_node_coordinates!(node_coordinates, multiply_dimensionwise!(view(node_coordinates, :, :, :, current_index += 1), matrix1, matrix2, view(mesh.tree_node_coordinates, :, :, :, - itree + 1), + global_itree + 1), tmp1) end end return node_coordinates end + +function init_mortar_neighbor_ids!(mortars::P4estMortarContainer{2}, my_face, + other_face, orientation, neighbor_ielements, + mortar_id) + if orientation == 0 + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[2] + 1 + else + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[1] + 1 + end +end end # @muladd diff --git a/src/solvers/dgsem_t8code/containers_3d.jl b/src/solvers/dgsem_t8code/containers_3d.jl new file mode 100644 index 00000000000..4d56bc734aa --- /dev/null +++ b/src/solvers/dgsem_t8code/containers_3d.jl @@ -0,0 +1,236 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +# Interpolate tree_node_coordinates to each quadrant at the specified nodes +function calc_node_coordinates!(node_coordinates, + mesh::T8codeMesh{3}, + nodes::AbstractVector) + # We use `StrideArray`s here since these buffers are used in performance-critical + # places and the additional information passed to the compiler makes them faster + # than native `Array`s. + tmp1 = StrideArray(undef, real(mesh), + StaticInt(3), static_length(nodes), static_length(mesh.nodes), + static_length(mesh.nodes)) + matrix1 = StrideArray(undef, real(mesh), + static_length(nodes), static_length(mesh.nodes)) + matrix2 = similar(matrix1) + matrix3 = similar(matrix1) + baryweights_in = barycentric_weights(mesh.nodes) + + num_local_trees = t8_forest_get_num_local_trees(mesh.forest) + + current_index = 0 + for itree in 0:(num_local_trees - 1) + tree_class = t8_forest_get_tree_class(mesh.forest, itree) + eclass_scheme = t8_forest_get_eclass_scheme(mesh.forest, tree_class) + num_elements_in_tree = t8_forest_get_tree_num_elements(mesh.forest, itree) + global_itree = t8_forest_global_tree_id(mesh.forest, itree) + + for ielement in 0:(num_elements_in_tree - 1) + element = t8_forest_get_element_in_tree(mesh.forest, itree, ielement) + element_level = t8_element_level(eclass_scheme, element) + + # Note, `t8_hex_len` is encoded as an integer (Morton encoding) in + # relation to `t8_hex_root_len`. This line transforms the + # "integer" length to a float in relation to the unit interval [0,1]. + element_length = t8_hex_len(element_level) / t8_hex_root_len + + element_coords = Vector{Float64}(undef, 3) + t8_element_vertex_reference_coords(eclass_scheme, element, 0, + pointer(element_coords)) + + nodes_out_x = (2 * + (element_length * 0.5 * (nodes .+ 1) .+ element_coords[1]) .- + 1) + nodes_out_y = (2 * + (element_length * 0.5 * (nodes .+ 1) .+ element_coords[2]) .- + 1) + nodes_out_z = (2 * + (element_length * 0.5 * (nodes .+ 1) .+ element_coords[3]) .- + 1) + + polynomial_interpolation_matrix!(matrix1, mesh.nodes, nodes_out_x, + baryweights_in) + polynomial_interpolation_matrix!(matrix2, mesh.nodes, nodes_out_y, + baryweights_in) + polynomial_interpolation_matrix!(matrix3, mesh.nodes, nodes_out_z, + baryweights_in) + + multiply_dimensionwise!(view(node_coordinates, :, :, :, :, + current_index += 1), + matrix1, matrix2, matrix3, + view(mesh.tree_node_coordinates, :, :, :, :, + global_itree + 1), + tmp1) + end + end + + return node_coordinates +end + +# This routine was copied and adapted from `src/dgsem_p4est/containers_3d.jl`: `orientation_to_indices_p4est`. +function init_mortar_neighbor_ids!(mortars::P4estMortarContainer{3}, my_face, + other_face, orientation, neighbor_ielements, + mortar_id) + # my_face and other_face are the face directions (zero-based) + # of "my side" and "other side" respectively. + # Face corner 0 of the face with the lower face direction connects to a corner of the other face. + # The number of this corner is the orientation code in `p4est`. + lower = my_face <= other_face + + # x_pos, y_neg, and z_pos are the directions in which the face has right-handed coordinates + # when looked at from the outside. + my_right_handed = my_face in (1, 2, 5) + other_right_handed = other_face in (1, 2, 5) + + # If both or none are right-handed when looked at from the outside, they will have different + # orientations when looked at from the same side of the interface. + flipped = my_right_handed == other_right_handed + + # In the following illustrations, the face corner numbering of `p4est` is shown. + # ξ and η are the local coordinates of the respective face. + # We're looking at both faces from the same side of the interface, so that "other side" + # (in the illustrations on the left) has right-handed coordinates. + if !flipped + if orientation == 0 + # Corner 0 of other side matches corner 0 of my side + # 2┌──────┐3 2┌──────┐3 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 0└──────┘1 + # η η + # ↑ ↑ + # │ │ + # └───> ξ └───> ξ + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[4] + 1 + + elseif ((lower && orientation == 2) # Corner 0 of my side matches corner 2 of other side + || + (!lower && orientation == 1)) # Corner 0 of other side matches corner 1 of my side + # 2┌──────┐3 0┌──────┐2 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 1└──────┘3 + # η ┌───> η + # ↑ │ + # │ ↓ + # └───> ξ ξ + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[3] + 1 + + elseif ((lower && orientation == 1) # Corner 0 of my side matches corner 1 of other side + || + (!lower && orientation == 2)) # Corner 0 of other side matches corner 2 of my side + # 2┌──────┐3 3┌──────┐1 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 2└──────┘0 + # η ξ + # ↑ ↑ + # │ │ + # └───> ξ η <───┘ + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[2] + 1 + + else # orientation == 3 + # Corner 0 of my side matches corner 3 of other side and + # corner 0 of other side matches corner 3 of my side. + # 2┌──────┐3 1┌──────┐0 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 3└──────┘2 + # η ξ <───┐ + # ↑ │ + # │ ↓ + # └───> ξ η + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[1] + 1 + end + else # flipped + if orientation == 0 + # Corner 0 of other side matches corner 0 of my side + # 2┌──────┐3 1┌──────┐3 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 0└──────┘2 + # η ξ + # ↑ ↑ + # │ │ + # └───> ξ └───> η + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[4] + 1 + + elseif orientation == 2 + # Corner 0 of my side matches corner 2 of other side and + # corner 0 of other side matches corner 2 of my side. + # 2┌──────┐3 0┌──────┐1 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 2└──────┘3 + # η ┌───> ξ + # ↑ │ + # │ ↓ + # └───> ξ η + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[2] + 1 + + elseif orientation == 1 + # Corner 0 of my side matches corner 1 of other side and + # corner 0 of other side matches corner 1 of my side. + # 2┌──────┐3 3┌──────┐2 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 1└──────┘0 + # η η + # ↑ ↑ + # │ │ + # └───> ξ ξ <───┘ + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[1] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[3] + 1 + + else # orientation == 3 + # Corner 0 of my side matches corner 3 of other side and + # corner 0 of other side matches corner 3 of my side. + # 2┌──────┐3 2┌──────┐0 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 3└──────┘1 + # η η <───┐ + # ↑ │ + # │ ↓ + # └───> ξ ξ + + mortars.neighbor_ids[1, mortar_id] = neighbor_ielements[4] + 1 + mortars.neighbor_ids[2, mortar_id] = neighbor_ielements[2] + 1 + mortars.neighbor_ids[3, mortar_id] = neighbor_ielements[3] + 1 + mortars.neighbor_ids[4, mortar_id] = neighbor_ielements[1] + 1 + end + end +end +end # @muladd diff --git a/src/solvers/dgsem_t8code/containers_parallel.jl b/src/solvers/dgsem_t8code/containers_parallel.jl new file mode 100644 index 00000000000..0cb3f5887a0 --- /dev/null +++ b/src/solvers/dgsem_t8code/containers_parallel.jl @@ -0,0 +1,65 @@ +function reinitialize_containers!(mesh::ParallelT8codeMesh, equations, dg::DGSEM, cache) + @unpack elements, interfaces, boundaries, mortars, mpi_interfaces, mpi_mortars, + mpi_cache = cache + resize!(elements, ncells(mesh)) + init_elements!(elements, mesh, dg.basis) + + count_required_surfaces!(mesh) + required = count_required_surfaces(mesh) + + resize!(interfaces, required.interfaces) + + resize!(boundaries, required.boundaries) + + resize!(mortars, required.mortars) + + resize!(mpi_interfaces, required.mpi_interfaces) + + resize!(mpi_mortars, required.mpi_mortars) + + mpi_mesh_info = (mpi_mortars = mpi_mortars, + mpi_interfaces = mpi_interfaces, + + # Temporary arrays for updating `mpi_cache`. + global_mortar_ids = fill(UInt64(0), nmpimortars(mpi_mortars)), + global_interface_ids = fill(UInt64(0), nmpiinterfaces(mpi_interfaces)), + neighbor_ranks_mortar = Vector{Vector{Int}}(undef, + nmpimortars(mpi_mortars)), + neighbor_ranks_interface = fill(-1, nmpiinterfaces(mpi_interfaces))) + + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names; mpi_mesh_info = mpi_mesh_info) + + init_mpi_cache!(mpi_cache, mesh, mpi_mesh_info, nvariables(equations), nnodes(dg), + eltype(elements)) + + empty!(mpi_mesh_info.global_mortar_ids) + empty!(mpi_mesh_info.global_interface_ids) + empty!(mpi_mesh_info.neighbor_ranks_mortar) + empty!(mpi_mesh_info.neighbor_ranks_interface) + + # Re-initialize and distribute normal directions of MPI mortars; requires + # MPI communication, so the MPI cache must be re-initialized beforehand. + init_normal_directions!(mpi_mortars, dg.basis, elements) + exchange_normal_directions!(mpi_mortars, mpi_cache, mesh, nnodes(dg)) + + return nothing +end + +# Compatibility to `dgsem_p4est/containers.jl`. +function init_mpi_interfaces!(interfaces, mesh::ParallelT8codeMesh) + # Do nothing. + return nothing +end + +# Compatibility to `dgsem_p4est/containers.jl`. +function init_mpi_mortars!(mortars, mesh::ParallelT8codeMesh) + # Do nothing. + return nothing +end + +# Compatibility to `dgsem_p4est/containers_parallel.jl`. +function init_mpi_mortars!(mpi_mortars, mesh::ParallelT8codeMesh, basis, elements) + # Do nothing. + return nothing +end diff --git a/src/solvers/dgsem_t8code/dg.jl b/src/solvers/dgsem_t8code/dg.jl index 16a9d7d35b1..e01b12e0f80 100644 --- a/src/solvers/dgsem_t8code/dg.jl +++ b/src/solvers/dgsem_t8code/dg.jl @@ -13,8 +13,8 @@ function create_cache(mesh::T8codeMesh, equations::AbstractEquations, dg::DG, :: boundaries = init_boundaries(mesh, equations, dg.basis, elements) mortars = init_mortars(mesh, equations, dg.basis, elements) - trixi_t8_fill_mesh_info(mesh.forest, elements, interfaces, mortars, boundaries, - mesh.boundary_names) + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names) cache = (; elements, interfaces, boundaries, mortars) @@ -28,4 +28,8 @@ end include("containers.jl") include("containers_2d.jl") +include("containers_3d.jl") + +include("containers_parallel.jl") +include("dg_parallel.jl") end # @muladd diff --git a/src/solvers/dgsem_t8code/dg_parallel.jl b/src/solvers/dgsem_t8code/dg_parallel.jl new file mode 100644 index 00000000000..ece614b7d75 --- /dev/null +++ b/src/solvers/dgsem_t8code/dg_parallel.jl @@ -0,0 +1,135 @@ +@muladd begin +#! format: noindent + +# This method is called when a `SemidiscretizationHyperbolic` is constructed. +# It constructs the basic `cache` used throughout the simulation to compute +# the RHS etc. +function create_cache(mesh::ParallelT8codeMesh, equations::AbstractEquations, dg::DG, + ::Any, + ::Type{uEltype}) where {uEltype <: Real} + # Make sure to balance and partition the forest before creating any + # containers in case someone has tampered with forest after creating the + # mesh. + balance!(mesh) + partition!(mesh) + + count_required_surfaces!(mesh) + + elements = init_elements(mesh, equations, dg.basis, uEltype) + mortars = init_mortars(mesh, equations, dg.basis, elements) + interfaces = init_interfaces(mesh, equations, dg.basis, elements) + boundaries = init_boundaries(mesh, equations, dg.basis, elements) + + mpi_mortars = init_mpi_mortars(mesh, equations, dg.basis, elements) + mpi_interfaces = init_mpi_interfaces(mesh, equations, dg.basis, elements) + + mpi_mesh_info = (mpi_mortars = mpi_mortars, + mpi_interfaces = mpi_interfaces, + global_mortar_ids = fill(UInt64(0), nmpimortars(mpi_mortars)), + global_interface_ids = fill(UInt64(0), + nmpiinterfaces(mpi_interfaces)), + neighbor_ranks_mortar = Vector{Vector{Int}}(undef, + nmpimortars(mpi_mortars)), + neighbor_ranks_interface = fill(-1, + nmpiinterfaces(mpi_interfaces))) + + fill_mesh_info!(mesh, interfaces, mortars, boundaries, + mesh.boundary_names; mpi_mesh_info = mpi_mesh_info) + + mpi_cache = init_mpi_cache(mesh, mpi_mesh_info, nvariables(equations), nnodes(dg), + uEltype) + + empty!(mpi_mesh_info.global_mortar_ids) + empty!(mpi_mesh_info.global_interface_ids) + empty!(mpi_mesh_info.neighbor_ranks_mortar) + empty!(mpi_mesh_info.neighbor_ranks_interface) + + init_normal_directions!(mpi_mortars, dg.basis, elements) + exchange_normal_directions!(mpi_mortars, mpi_cache, mesh, nnodes(dg)) + + cache = (; elements, interfaces, mpi_interfaces, boundaries, mortars, mpi_mortars, + mpi_cache) + + # Add specialized parts of the cache required to compute the volume integral etc. + cache = (; cache..., + create_cache(mesh, equations, dg.volume_integral, dg, uEltype)...) + cache = (; cache..., create_cache(mesh, equations, dg.mortar, uEltype)...) + + return cache +end + +function init_mpi_cache(mesh::ParallelT8codeMesh, mpi_mesh_info, nvars, nnodes, uEltype) + mpi_cache = P4estMPICache(uEltype) + init_mpi_cache!(mpi_cache, mesh, mpi_mesh_info, nvars, nnodes, uEltype) + return mpi_cache +end + +function init_mpi_cache!(mpi_cache::P4estMPICache, mesh::ParallelT8codeMesh, + mpi_mesh_info, nvars, nnodes, uEltype) + mpi_neighbor_ranks, mpi_neighbor_interfaces, mpi_neighbor_mortars = init_mpi_neighbor_connectivity(mpi_mesh_info, + mesh) + + mpi_send_buffers, mpi_recv_buffers, mpi_send_requests, mpi_recv_requests = init_mpi_data_structures(mpi_neighbor_interfaces, + mpi_neighbor_mortars, + ndims(mesh), + nvars, + nnodes, + uEltype) + + n_elements_global = Int(t8_forest_get_global_num_elements(mesh.forest)) + n_elements_local = Int(t8_forest_get_local_num_elements(mesh.forest)) + + n_elements_by_rank = Vector{Int}(undef, mpi_nranks()) + n_elements_by_rank[mpi_rank() + 1] = n_elements_local + + MPI.Allgather!(MPI.UBuffer(n_elements_by_rank, 1), mpi_comm()) + + n_elements_by_rank = OffsetArray(n_elements_by_rank, 0:(mpi_nranks() - 1)) + + # Account for 1-based indexing in Julia. + first_element_global_id = sum(n_elements_by_rank[0:(mpi_rank() - 1)]) + 1 + + @assert n_elements_global==sum(n_elements_by_rank) "error in total number of elements" + + @pack! mpi_cache = mpi_neighbor_ranks, mpi_neighbor_interfaces, + mpi_neighbor_mortars, + mpi_send_buffers, mpi_recv_buffers, + mpi_send_requests, mpi_recv_requests, + n_elements_by_rank, n_elements_global, + first_element_global_id + + return mpi_cache +end + +function init_mpi_neighbor_connectivity(mpi_mesh_info, mesh::ParallelT8codeMesh) + @unpack mpi_interfaces, mpi_mortars, global_interface_ids, neighbor_ranks_interface, global_mortar_ids, neighbor_ranks_mortar = mpi_mesh_info + + mpi_neighbor_ranks = vcat(neighbor_ranks_interface, neighbor_ranks_mortar...) |> + sort |> unique + + p = sortperm(global_interface_ids) + + neighbor_ranks_interface .= neighbor_ranks_interface[p] + interface_ids = collect(1:nmpiinterfaces(mpi_interfaces))[p] + + p = sortperm(global_mortar_ids) + neighbor_ranks_mortar .= neighbor_ranks_mortar[p] + mortar_ids = collect(1:nmpimortars(mpi_mortars))[p] + + # For each neighbor rank, init connectivity data structures + mpi_neighbor_interfaces = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks)) + mpi_neighbor_mortars = Vector{Vector{Int}}(undef, length(mpi_neighbor_ranks)) + for (index, d) in enumerate(mpi_neighbor_ranks) + mpi_neighbor_interfaces[index] = interface_ids[findall(==(d), + neighbor_ranks_interface)] + mpi_neighbor_mortars[index] = mortar_ids[findall(x -> (d in x), + neighbor_ranks_mortar)] + end + + # Check that all interfaces were counted exactly once + @assert mapreduce(length, +, mpi_neighbor_interfaces; init = 0) == + nmpiinterfaces(mpi_interfaces) + + return mpi_neighbor_ranks, mpi_neighbor_interfaces, mpi_neighbor_mortars +end +end # @muladd diff --git a/src/solvers/dgsem_tree/dg_2d_parabolic.jl b/src/solvers/dgsem_tree/dg_2d_parabolic.jl index 3083ae30680..a6c962e03cd 100644 --- a/src/solvers/dgsem_tree/dg_2d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_2d_parabolic.jl @@ -13,7 +13,7 @@ # 2. compute f(u, grad(u)) # 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) # boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -function rhs_parabolic!(du, u, t, mesh::TreeMesh{2}, +function rhs_parabolic!(du, u, t, mesh::Union{TreeMesh{2}, TreeMesh{3}}, equations_parabolic::AbstractEquationsParabolic, initial_condition, boundary_conditions_parabolic, source_terms, dg::DG, parabolic_scheme, cache, cache_parabolic) @@ -951,22 +951,4 @@ function apply_jacobian_parabolic!(du, mesh::TreeMesh{2}, return nothing end - -function apply_jacobian_parabolic!(du, mesh::P4estMesh{2}, - equations::AbstractEquationsParabolic, - dg::DG, cache) - @unpack inverse_jacobian = cache.elements - - @threaded for element in eachelement(dg, cache) - for j in eachnode(dg), i in eachnode(dg) - factor = inverse_jacobian[i, j, element] - - for v in eachvariable(equations) - du[v, i, j, element] *= factor - end - end - end - - return nothing -end end # @muladd diff --git a/src/solvers/dgsem_tree/dg_2d_parallel.jl b/src/solvers/dgsem_tree/dg_2d_parallel.jl index 8095dae123a..157d462aa2f 100644 --- a/src/solvers/dgsem_tree/dg_2d_parallel.jl +++ b/src/solvers/dgsem_tree/dg_2d_parallel.jl @@ -446,7 +446,8 @@ function init_mpi_neighbor_connectivity(elements, mpi_interfaces, mpi_mortars, end function rhs!(du, u, t, - mesh::Union{ParallelTreeMesh{2}, ParallelP4estMesh{2}}, equations, + mesh::Union{ParallelTreeMesh{2}, ParallelP4estMesh{2}, + ParallelT8codeMesh{2}}, equations, initial_condition, boundary_conditions, source_terms::Source, dg::DG, cache) where {Source} # Start to receive MPI data diff --git a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl index 2fc62f548d2..9af8b65b4cd 100644 --- a/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl +++ b/src/solvers/dgsem_tree/dg_2d_subcell_limiters.jl @@ -470,6 +470,9 @@ end For subcell limiting, the calculation of local bounds for non-periodic domains require the boundary outer state. This function returns the boundary value at time `t` and for node with spatial indices `indices`. + +!!! warning "Experimental implementation" + This is an experimental feature and may change in future releases. """ @inline function get_boundary_outer_state(boundary_condition::BoundaryConditionDirichlet, cache, t, equations, dg, indices...) diff --git a/src/solvers/dgsem_tree/dg_3d.jl b/src/solvers/dgsem_tree/dg_3d.jl index 0955dc38655..02ff338e912 100644 --- a/src/solvers/dgsem_tree/dg_3d.jl +++ b/src/solvers/dgsem_tree/dg_3d.jl @@ -36,13 +36,15 @@ end # The methods below are specialized on the volume integral type # and called from the basic `create_cache` method at the top. -function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, +function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, volume_integral::VolumeIntegralFluxDifferencing, dg::DG, uEltype) NamedTuple() end -function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, +function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, volume_integral::VolumeIntegralShockCapturingHG, dg::DG, uEltype) element_ids_dg = Int[] @@ -79,8 +81,8 @@ function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, fstar2_L_threaded, fstar2_R_threaded, fstar3_L_threaded, fstar3_R_threaded) end -function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, - equations, +function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, volume_integral::VolumeIntegralPureLGLFiniteVolume, dg::DG, uEltype) A4dp1_x = Array{uEltype, 4} @@ -112,7 +114,8 @@ end # The methods below are specialized on the mortar type # and called from the basic `create_cache` method at the top. -function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, +function create_cache(mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, equations, mortar_l2::LobattoLegendreMortarL2, uEltype) # TODO: Taal compare performance of different types A3d = Array{uEltype, 3} @@ -140,7 +143,7 @@ end # TODO: Taal discuss/refactor timer, allowing users to pass a custom timer? function rhs!(du, u, t, - mesh::Union{TreeMesh{3}, P4estMesh{3}}, equations, + mesh::Union{TreeMesh{3}, P4estMesh{3}, T8codeMesh{3}}, equations, initial_condition, boundary_conditions, source_terms::Source, dg::DG, cache) where {Source} # Reset du @@ -209,8 +212,8 @@ function rhs!(du, u, t, end function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, - P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralWeakForm, dg::DGSEM, cache) @@ -264,8 +267,8 @@ See also https://github.com/trixi-framework/Trixi.jl/issues/1671#issuecomment-17 end function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, - P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralFluxDifferencing, dg::DGSEM, cache) @@ -378,8 +381,8 @@ end # TODO: Taal dimension agnostic function calc_volume_integral!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, - P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms, equations, volume_integral::VolumeIntegralShockCapturingHG, dg::DGSEM, cache) @@ -437,7 +440,8 @@ function calc_volume_integral!(du, u, end @inline function fv_kernel!(du, u, - mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}}, + mesh::Union{TreeMesh{3}, StructuredMesh{3}, P4estMesh{3}, + T8codeMesh{3}}, nonconservative_terms, equations, volume_flux_fv, dg::DGSEM, cache, element, alpha = true) @unpack fstar1_L_threaded, fstar1_R_threaded, fstar2_L_threaded, fstar2_R_threaded, fstar3_L_threaded, fstar3_R_threaded = cache diff --git a/src/solvers/dgsem_tree/dg_3d_parabolic.jl b/src/solvers/dgsem_tree/dg_3d_parabolic.jl index 9ad28c6aa8e..d5504744742 100644 --- a/src/solvers/dgsem_tree/dg_3d_parabolic.jl +++ b/src/solvers/dgsem_tree/dg_3d_parabolic.jl @@ -5,114 +5,6 @@ @muladd begin #! format: noindent -# This file collects all methods that have been updated to work with parabolic systems of equations -# -# assumptions: parabolic terms are of the form div(f(u, grad(u))) and -# will be discretized first order form as follows: -# 1. compute grad(u) -# 2. compute f(u, grad(u)) -# 3. compute div(f(u, grad(u))) (i.e., the "regular" rhs! call) -# boundary conditions will be applied to both grad(u) and div(f(u, grad(u))). -function rhs_parabolic!(du, u, t, mesh::TreeMesh{3}, - equations_parabolic::AbstractEquationsParabolic, - initial_condition, boundary_conditions_parabolic, source_terms, - dg::DG, parabolic_scheme, cache, cache_parabolic) - @unpack viscous_container = cache_parabolic - @unpack u_transformed, gradients, flux_viscous = viscous_container - - # Convert conservative variables to a form more suitable for viscous flux calculations - @trixi_timeit timer() "transform variables" begin - transform_variables!(u_transformed, u, mesh, equations_parabolic, - dg, parabolic_scheme, cache, cache_parabolic) - end - - # Compute the gradients of the transformed variables - @trixi_timeit timer() "calculate gradient" begin - calc_gradient!(gradients, u_transformed, t, mesh, equations_parabolic, - boundary_conditions_parabolic, dg, cache, cache_parabolic) - end - - # Compute and store the viscous fluxes - @trixi_timeit timer() "calculate viscous fluxes" begin - calc_viscous_fluxes!(flux_viscous, gradients, u_transformed, mesh, - equations_parabolic, dg, cache, cache_parabolic) - end - - # The remainder of this function is essentially a regular rhs! for parabolic - # equations (i.e., it computes the divergence of the viscous fluxes) - # - # OBS! In `calc_viscous_fluxes!`, the viscous flux values at the volume nodes of each element have - # been computed and stored in `fluxes_viscous`. In the following, we *reuse* (abuse) the - # `interfaces` and `boundaries` containers in `cache_parabolic` to interpolate and store the - # *fluxes* at the element surfaces, as opposed to interpolating and storing the *solution* (as it - # is done in the hyperbolic operator). That is, `interfaces.u`/`boundaries.u` store *viscous flux values* - # and *not the solution*. The advantage is that a) we do not need to allocate more storage, b) we - # do not need to recreate the existing data structure only with a different name, and c) we do not - # need to interpolate solutions *and* gradients to the surfaces. - - # TODO: parabolic; reconsider current data structure reuse strategy - - # Reset du - @trixi_timeit timer() "reset ∂u/∂t" reset_du!(du, dg, cache) - - # Calculate volume integral - @trixi_timeit timer() "volume integral" begin - calc_volume_integral!(du, flux_viscous, mesh, equations_parabolic, dg, cache) - end - - # Prolong solution to interfaces - @trixi_timeit timer() "prolong2interfaces" begin - prolong2interfaces!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate interface fluxes - @trixi_timeit timer() "interface flux" begin - calc_interface_flux!(cache_parabolic.elements.surface_flux_values, mesh, - equations_parabolic, dg, cache_parabolic) - end - - # Prolong solution to boundaries - @trixi_timeit timer() "prolong2boundaries" begin - prolong2boundaries!(cache_parabolic, flux_viscous, mesh, equations_parabolic, - dg.surface_integral, dg, cache) - end - - # Calculate boundary fluxes - @trixi_timeit timer() "boundary flux" begin - calc_boundary_flux_divergence!(cache_parabolic, t, - boundary_conditions_parabolic, - mesh, equations_parabolic, - dg.surface_integral, dg) - end - - # Prolong solution to mortars - @trixi_timeit timer() "prolong2mortars" begin - prolong2mortars!(cache, flux_viscous, mesh, equations_parabolic, - dg.mortar, dg.surface_integral, dg) - end - - # Calculate mortar fluxes - @trixi_timeit timer() "mortar flux" begin - calc_mortar_flux!(cache_parabolic.elements.surface_flux_values, mesh, - equations_parabolic, - dg.mortar, dg.surface_integral, dg, cache) - end - - # Calculate surface integrals - @trixi_timeit timer() "surface integral" begin - calc_surface_integral!(du, u, mesh, equations_parabolic, - dg.surface_integral, dg, cache_parabolic) - end - - # Apply Jacobian from mapping to reference element - @trixi_timeit timer() "Jacobian" begin - apply_jacobian_parabolic!(du, mesh, equations_parabolic, dg, cache_parabolic) - end - - return nothing -end - # Transform solution variables prior to taking the gradient # (e.g., conservative to primitive variables). Defaults to doing nothing. # TODO: can we avoid copying data? @@ -1141,22 +1033,4 @@ function apply_jacobian_parabolic!(du, mesh::TreeMesh{3}, return nothing end - -function apply_jacobian_parabolic!(du, mesh::P4estMesh{3}, - equations::AbstractEquationsParabolic, - dg::DG, cache) - @unpack inverse_jacobian = cache.elements - - @threaded for element in eachelement(dg, cache) - for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) - factor = inverse_jacobian[i, j, k, element] - - for v in eachvariable(equations) - du[v, i, j, k, element] *= factor - end - end - end - - return nothing -end end # @muladd diff --git a/src/solvers/dgsem_tree/indicators_3d.jl b/src/solvers/dgsem_tree/indicators_3d.jl index 40362889397..a11a8e06e4b 100644 --- a/src/solvers/dgsem_tree/indicators_3d.jl +++ b/src/solvers/dgsem_tree/indicators_3d.jl @@ -101,7 +101,8 @@ end alpha[element] = min(alpha_max, alpha_element) end -function apply_smoothing!(mesh::Union{TreeMesh{3}, P4estMesh{3}}, alpha, alpha_tmp, dg, +function apply_smoothing!(mesh::Union{TreeMesh{3}, P4estMesh{3}, T8codeMesh{3}}, alpha, + alpha_tmp, dg, cache) # Diffuse alpha values by setting each alpha to at least 50% of neighboring elements' alpha diff --git a/src/solvers/dgsem_tree/subcell_limiters.jl b/src/solvers/dgsem_tree/subcell_limiters.jl index 055e7ce24a4..e433c953779 100644 --- a/src/solvers/dgsem_tree/subcell_limiters.jl +++ b/src/solvers/dgsem_tree/subcell_limiters.jl @@ -16,18 +16,28 @@ end SubcellLimiterIDP(equations::AbstractEquations, basis; local_minmax_variables_cons = String[], positivity_variables_cons = String[], - positivity_correction_factor = 0.1) + positivity_variables_nonlinear = [], + positivity_correction_factor = 0.1, + max_iterations_newton = 10, + newton_tolerances = (1.0e-12, 1.0e-14), + gamma_constant_newton = 2 * ndims(equations)) Subcell invariant domain preserving (IDP) limiting used with [`VolumeIntegralSubcellLimiting`](@ref) including: - Local maximum/minimum Zalesak-type limiting for conservative variables (`local_minmax_variables_cons`) -- Positivity limiting for conservative variables (`positivity_variables_cons`) +- Positivity limiting for conservative variables (`positivity_variables_cons`) and nonlinear variables +(`positivity_variables_nonlinear`) Conservative variables to be limited are passed as a vector of strings, e.g. `local_minmax_variables_cons = ["rho"]` -and `positivity_variables_cons = ["rho"]`. +and `positivity_variables_cons = ["rho"]`. For nonlinear variables the specific functions are +passed in a vector, e.g. `positivity_variables_nonlinear = [pressure]`. The bounds are calculated using the low-order FV solution. The positivity limiter uses `positivity_correction_factor` such that `u^new >= positivity_correction_factor * u^FV`. +The limiting of nonlinear variables uses a Newton-bisection method with a maximum of +`max_iterations_newton` iterations, relative and absolute tolerances of `newton_tolerances` +and a provisional update constant `gamma_constant_newton` (`gamma_constant_newton>=2*d`, +where `d = #dimensions`). See equation (20) of Pazner (2020) and equation (30) of Rueda-Ramírez et al. (2022). !!! note This limiter and the correction callback [`SubcellLimiterIDPCorrection`](@ref) only work together. @@ -45,22 +55,32 @@ The bounds are calculated using the low-order FV solution. The positivity limite !!! warning "Experimental implementation" This is an experimental feature and may change in future releases. """ -struct SubcellLimiterIDP{RealT <: Real, Cache} <: AbstractSubcellLimiter +struct SubcellLimiterIDP{RealT <: Real, LimitingVariablesNonlinear, Cache} <: + AbstractSubcellLimiter local_minmax::Bool local_minmax_variables_cons::Vector{Int} # Local mininum/maximum principles for conservative variables positivity::Bool positivity_variables_cons::Vector{Int} # Positivity for conservative variables + positivity_variables_nonlinear::LimitingVariablesNonlinear # Positivity for nonlinear variables positivity_correction_factor::RealT cache::Cache + max_iterations_newton::Int + newton_tolerances::Tuple{RealT, RealT} # Relative and absolute tolerances for Newton's method + gamma_constant_newton::RealT # Constant for the subcell limiting of convex (nonlinear) constraints end # this method is used when the limiter is constructed as for shock-capturing volume integrals function SubcellLimiterIDP(equations::AbstractEquations, basis; local_minmax_variables_cons = String[], positivity_variables_cons = String[], - positivity_correction_factor = 0.1) + positivity_variables_nonlinear = [], + positivity_correction_factor = 0.1, + max_iterations_newton = 10, + newton_tolerances = (1.0e-12, 1.0e-14), + gamma_constant_newton = 2 * ndims(equations)) local_minmax = (length(local_minmax_variables_cons) > 0) - positivity = (length(positivity_variables_cons) > 0) + positivity = (length(positivity_variables_cons) + + length(positivity_variables_nonlinear) > 0) local_minmax_variables_cons_ = get_variable_index.(local_minmax_variables_cons, equations) @@ -80,13 +100,20 @@ function SubcellLimiterIDP(equations::AbstractEquations, basis; bound_keys = (bound_keys..., Symbol(string(v), "_min")) end end + for variable in positivity_variables_nonlinear + bound_keys = (bound_keys..., Symbol(string(variable), "_min")) + end cache = create_cache(SubcellLimiterIDP, equations, basis, bound_keys) SubcellLimiterIDP{typeof(positivity_correction_factor), + typeof(positivity_variables_nonlinear), typeof(cache)}(local_minmax, local_minmax_variables_cons_, positivity, positivity_variables_cons_, - positivity_correction_factor, cache) + positivity_variables_nonlinear, + positivity_correction_factor, cache, + max_iterations_newton, newton_tolerances, + gamma_constant_newton) end function Base.show(io::IO, limiter::SubcellLimiterIDP) @@ -97,10 +124,15 @@ function Base.show(io::IO, limiter::SubcellLimiterIDP) if !(local_minmax || positivity) print(io, "No limiter selected => pure DG method") else - print(io, "limiter=(") - local_minmax && print(io, "min/max limiting, ") - positivity && print(io, "positivity") - print(io, "), ") + features = String[] + if local_minmax + push!(features, "local min/max") + end + if positivity + push!(features, "positivity") + end + join(io, features, ", ") + print(io, "Limiter=($features), ") end print(io, "Local bounds with FV solution") print(io, ")") @@ -120,15 +152,15 @@ function Base.show(io::IO, ::MIME"text/plain", limiter::SubcellLimiterIDP) if local_minmax setup = [ setup..., - "" => "local maximum/minimum bounds for conservative variables $(limiter.local_minmax_variables_cons)", + "" => "Local maximum/minimum limiting for conservative variables $(limiter.local_minmax_variables_cons)", ] end if positivity - string = "positivity for conservative variables $(limiter.positivity_variables_cons)" + string = "Positivity limiting for conservative variables $(limiter.positivity_variables_cons) and $(limiter.positivity_variables_nonlinear)" setup = [setup..., "" => string] setup = [ setup..., - "" => " positivity correction factor = $(limiter.positivity_correction_factor)", + "" => "- with positivity correction factor = $(limiter.positivity_correction_factor)", ] end setup = [ diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl index 384f4178bc9..3f7954c8958 100644 --- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl @@ -5,6 +5,10 @@ @muladd begin #! format: noindent +############################################################################### +# IDP Limiting +############################################################################### + # this method is used when the limiter is constructed as for shock-capturing volume integrals function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquations{2}, basis::LobattoLegendreBasis, bound_keys) @@ -13,21 +17,32 @@ function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquat bound_keys) # Memory for bounds checking routine with `BoundsCheckCallback`. - # The first entry of each vector contains the maximum deviation since the last export. - # The second one contains the total maximum deviation. - idp_bounds_delta = Dict{Symbol, Vector{real(basis)}}() + # Local variable contains the maximum deviation since the last export. + # Using a threaded vector to parallelize bounds check. + idp_bounds_delta_local = Dict{Symbol, Vector{real(basis)}}() + # Global variable contains the total maximum deviation. + idp_bounds_delta_global = Dict{Symbol, real(basis)}() + # Note: False sharing causes critical performance issues on multiple threads when using a vector + # of length `Threads.nthreads()`. Initializing a vector of length `n * Threads.nthreads()` + # and then only using every n-th entry, fixes the problem and allows proper scaling. + # Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)` + stride_size = div(128, sizeof(eltype(basis.nodes))) # = n for key in bound_keys - idp_bounds_delta[key] = zeros(real(basis), 2) + idp_bounds_delta_local[key] = [zero(real(basis)) + for _ in 1:(stride_size * Threads.nthreads())] + idp_bounds_delta_global[key] = zero(real(basis)) end - return (; subcell_limiter_coefficients, idp_bounds_delta) + return (; subcell_limiter_coefficients, idp_bounds_delta_local, + idp_bounds_delta_global) end function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSEM, t, dt; kwargs...) @unpack alpha = limiter.cache.subcell_limiter_coefficients - alpha .= zero(eltype(alpha)) + # TODO: Do not abuse `reset_du!` but maybe implement a generic `set_zero!` + @trixi_timeit timer() "reset alpha" reset_du!(alpha, dg, semi.cache) if limiter.local_minmax @trixi_timeit timer() "local min/max limiting" idp_local_minmax!(alpha, limiter, @@ -55,6 +70,9 @@ function (limiter::SubcellLimiterIDP)(u::AbstractArray{<:Any, 4}, semi, dg::DGSE return nothing end +############################################################################### +# Calculation of local bounds using low-order FV solution + @inline function calc_bounds_twosided!(var_min, var_max, variable, u, t, semi) mesh, equations, dg, cache = mesh_equations_solver_cache(semi) # Calc bounds inside elements @@ -153,6 +171,9 @@ end return nothing end +############################################################################### +# Local minimum/maximum limiting + @inline function idp_local_minmax!(alpha, limiter, u, t, dt, semi) for variable in limiter.local_minmax_variables_cons idp_local_minmax!(alpha, limiter, u, t, dt, semi, variable) @@ -222,16 +243,36 @@ end return nothing end +############################################################################### +# Global positivity limiting + @inline function idp_positivity!(alpha, limiter, u, dt, semi) # Conservative variables for variable in limiter.positivity_variables_cons - idp_positivity!(alpha, limiter, u, dt, semi, variable) + @trixi_timeit timer() "conservative variables" idp_positivity_conservative!(alpha, + limiter, + u, + dt, + semi, + variable) + end + + # Nonlinear variables + for variable in limiter.positivity_variables_nonlinear + @trixi_timeit timer() "nonlinear variables" idp_positivity_nonlinear!(alpha, + limiter, + u, dt, + semi, + variable) end return nothing end -@inline function idp_positivity!(alpha, limiter, u, dt, semi, variable) +############################################################################### +# Global positivity limiting of conservative variables + +@inline function idp_positivity_conservative!(alpha, limiter, u, dt, semi, variable) mesh, equations, dg, cache = mesh_equations_solver_cache(semi) (; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes (; inverse_weights) = dg.basis @@ -245,7 +286,7 @@ end for j in eachnode(dg), i in eachnode(dg) var = u[variable, i, j, element] if var < 0 - error("Safe $variable is not safe. element=$element, node: $i $j, value=$var") + error("Safe low-order method produces negative value for conservative variable $variable. Try a smaller time step.") end # Compute bound @@ -291,4 +332,183 @@ end return nothing end + +@inline function idp_positivity_nonlinear!(alpha, limiter, u, dt, semi, variable) + _, equations, dg, cache = mesh_equations_solver_cache(semi) + (; positivity_correction_factor) = limiter + + (; variable_bounds) = limiter.cache.subcell_limiter_coefficients + var_min = variable_bounds[Symbol(string(variable), "_min")] + + @threaded for element in eachelement(dg, semi.cache) + inverse_jacobian = cache.elements.inverse_jacobian[element] + for j in eachnode(dg), i in eachnode(dg) + # Compute bound + u_local = get_node_vars(u, equations, dg, i, j, element) + var = variable(u_local, equations) + if var < 0 + error("Safe low-order method produces negative value for variable $variable. Try a smaller time step.") + end + var_min[i, j, element] = positivity_correction_factor * var + + # Perform Newton's bisection method to find new alpha + newton_loops_alpha!(alpha, var_min[i, j, element], u_local, i, j, element, + variable, initial_check_nonnegative_newton_idp, + final_check_nonnegative_newton_idp, inverse_jacobian, + dt, equations, dg, cache, limiter) + end + end + + return nothing +end + +@inline function newton_loops_alpha!(alpha, bound, u, i, j, element, variable, + initial_check, final_check, inverse_jacobian, dt, + equations, dg, cache, limiter) + (; inverse_weights) = dg.basis + (; antidiffusive_flux1_L, antidiffusive_flux2_L, antidiffusive_flux1_R, antidiffusive_flux2_R) = cache.antidiffusive_fluxes + + (; gamma_constant_newton) = limiter + + # negative xi direction + antidiffusive_flux = gamma_constant_newton * inverse_jacobian * inverse_weights[i] * + get_node_vars(antidiffusive_flux1_R, equations, dg, i, j, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + # positive xi direction + antidiffusive_flux = -gamma_constant_newton * inverse_jacobian * + inverse_weights[i] * + get_node_vars(antidiffusive_flux1_L, equations, dg, i + 1, j, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + # negative eta direction + antidiffusive_flux = gamma_constant_newton * inverse_jacobian * inverse_weights[j] * + get_node_vars(antidiffusive_flux2_R, equations, dg, i, j, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + # positive eta direction + antidiffusive_flux = -gamma_constant_newton * inverse_jacobian * + inverse_weights[j] * + get_node_vars(antidiffusive_flux2_L, equations, dg, i, j + 1, + element) + newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, final_check, + equations, dt, limiter, antidiffusive_flux) + + return nothing +end + +@inline function newton_loop!(alpha, bound, u, i, j, element, variable, initial_check, + final_check, equations, dt, limiter, antidiffusive_flux) + newton_reltol, newton_abstol = limiter.newton_tolerances + + beta = 1 - alpha[i, j, element] + + beta_L = 0 # alpha = 1 + beta_R = beta # No higher beta (lower alpha) than the current one + + u_curr = u + beta * dt * antidiffusive_flux + + # If state is valid, perform initial check and return if correction is not needed + if isvalid(u_curr, equations) + goal = goal_function_newton_idp(variable, bound, u_curr, equations) + + initial_check(bound, goal, newton_abstol) && return nothing + end + + # Newton iterations + for iter in 1:(limiter.max_iterations_newton) + beta_old = beta + + # If the state is valid, evaluate d(goal)/d(beta) + if isvalid(u_curr, equations) + dgoal_dbeta = dgoal_function_newton_idp(variable, u_curr, dt, + antidiffusive_flux, equations) + else # Otherwise, perform a bisection step + dgoal_dbeta = 0 + end + + if dgoal_dbeta != 0 + # Update beta with Newton's method + beta = beta - goal / dgoal_dbeta + end + + # Check bounds + if (beta < beta_L) || (beta > beta_R) || (dgoal_dbeta == 0) || isnan(beta) + # Out of bounds, do a bisection step + beta = 0.5 * (beta_L + beta_R) + # Get new u + u_curr = u + beta * dt * antidiffusive_flux + + # If the state is invalid, finish bisection step without checking tolerance and iterate further + if !isvalid(u_curr, equations) + beta_R = beta + continue + end + + # Check new beta for condition and update bounds + goal = goal_function_newton_idp(variable, bound, u_curr, equations) + if initial_check(bound, goal, newton_abstol) + # New beta fulfills condition + beta_L = beta + else + # New beta does not fulfill condition + beta_R = beta + end + else + # Get new u + u_curr = u + beta * dt * antidiffusive_flux + + # If the state is invalid, redefine right bound without checking tolerance and iterate further + if !isvalid(u_curr, equations) + beta_R = beta + continue + end + + # Evaluate goal function + goal = goal_function_newton_idp(variable, bound, u_curr, equations) + end + + # Check relative tolerance + if abs(beta_old - beta) <= newton_reltol + break + end + + # Check absolute tolerance + if final_check(bound, goal, newton_abstol) + break + end + end + + new_alpha = 1 - beta + if alpha[i, j, element] > new_alpha + newton_abstol + error("Alpha is getting smaller. old: $(alpha[i, j, element]), new: $new_alpha") + else + alpha[i, j, element] = new_alpha + end + + return nothing +end + +### Auxiliary routines for Newton's bisection method ### +# Initial checks +@inline initial_check_nonnegative_newton_idp(bound, goal, newton_abstol) = goal <= 0 + +# Goal and d(Goal)d(u) function +@inline goal_function_newton_idp(variable, bound, u, equations) = bound - + variable(u, equations) +@inline function dgoal_function_newton_idp(variable, u, dt, antidiffusive_flux, + equations) + -dot(gradient_conservative(variable, u, equations), dt * antidiffusive_flux) +end + +# Final checks +@inline function final_check_nonnegative_newton_idp(bound, goal, newton_abstol) + (goal <= eps()) && (goal > -max(newton_abstol, abs(bound) * newton_abstol)) +end end # @muladd diff --git a/test/Project.toml b/test/Project.toml index ecae0ac0900..a376c2805ea 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -13,13 +13,13 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] Aqua = "0.8" -CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10" +CairoMakie = "0.10" Downloads = "1" -ForwardDiff = "0.10" +ForwardDiff = "0.10.24" LinearAlgebra = "1" MPI = "0.20" OrdinaryDiffEq = "6.49.1" -Plots = "1.16" +Plots = "1.19" Printf = "1" Random = "1" Test = "1" diff --git a/test/runtests.jl b/test/runtests.jl index 7e195fe7402..49f0977bb70 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -89,6 +89,10 @@ const TRIXI_NTHREADS = clamp(Sys.CPU_THREADS, 2, 3) include("test_t8code_2d.jl") end + @time if TRIXI_TEST == "all" || TRIXI_TEST == "t8code_part2" + include("test_t8code_3d.jl") + end + @time if TRIXI_TEST == "all" || TRIXI_TEST == "unstructured_dgmulti" include("test_unstructured_2d.jl") include("test_dgmulti_1d.jl") diff --git a/test/test_mpi.jl b/test/test_mpi.jl index ad1ba4e835d..001d9bff86e 100644 --- a/test/test_mpi.jl +++ b/test/test_mpi.jl @@ -8,6 +8,7 @@ include("test_trixi.jl") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) # CI with MPI and some tests fails often on Windows. Thus, we check whether this # is the case here. We use GitHub Actions, so we can check whether we run CI @@ -19,10 +20,12 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() # TreeMesh tests include("test_mpi_tree.jl") - # P4estMesh tests + # P4estMesh and T8codeMesh tests include("test_mpi_p4est_2d.jl") + include("test_mpi_t8code_2d.jl") if !CI_ON_WINDOWS # see comment on `CI_ON_WINDOWS` above include("test_mpi_p4est_3d.jl") + include("test_mpi_t8code_3d.jl") end end # MPI @@ -43,5 +46,6 @@ end # MPI supporting functionality # Clean up afterwards: delete Trixi.jl output directory Trixi.mpi_isroot() && @test_nowarn rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) end # module diff --git a/test/test_mpi_p4est_2d.jl b/test/test_mpi_p4est_2d.jl index da90537fcfd..6d66bc68a26 100644 --- a/test/test_mpi_p4est_2d.jl +++ b/test/test_mpi_p4est_2d.jl @@ -33,6 +33,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") @test errors.linf≈[0.00011787417954578494] rtol=1.0e-4 end end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_nonconforming_flag.jl" begin @@ -40,6 +49,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") "elixir_advection_nonconforming_flag.jl"), l2=[3.198940059144588e-5], linf=[0.00030636069494005547]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_unstructured_flag.jl" begin @@ -47,6 +65,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") "elixir_advection_unstructured_flag.jl"), l2=[0.0005379687442422346], linf=[0.007438525029884735]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr_solution_independent.jl" begin @@ -56,6 +83,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") l2=[4.949660644033807e-5], linf=[0.0004867846262313763], coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr_unstructured_flag.jl" begin @@ -64,6 +100,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") l2=[0.0012766060609964525], linf=[0.01750280631586159], coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_restart.jl" begin @@ -73,6 +118,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") # With the default `maxiters = 1` in coverage tests, # there would be no time steps after the restart. coverage_override=(maxiters = 100_000,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin @@ -90,6 +144,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_2d_dgsem") 0.03759938693042297, 0.08039824959535657, ]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end end # P4estMesh MPI diff --git a/test/test_mpi_p4est_3d.jl b/test/test_mpi_p4est_3d.jl index 75f43650082..cca9093ec51 100644 --- a/test/test_mpi_p4est_3d.jl +++ b/test/test_mpi_p4est_3d.jl @@ -33,6 +33,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") @test errors.linf≈[0.0014548839020096516] rtol=1.0e-4 end end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr.jl" begin @@ -46,6 +55,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") initial_refinement_level = 2, base_level = 2, med_level = 3, max_level = 4)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_amr_unstructured_curved.jl" begin @@ -58,6 +76,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") initial_refinement_level = 0, base_level = 0, med_level = 1, max_level = 2)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_restart.jl" begin @@ -67,12 +94,30 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") # With the default `maxiters = 1` in coverage tests, # there would be no time steps after the restart. coverage_override=(maxiters = 100_000,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_advection_cubed_sphere.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_cubed_sphere.jl"), l2=[0.002006918015656413], linf=[0.027655117058380085]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end # Compressible Euler @@ -94,6 +139,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") 0.008526972236273522, ], tspan=(0.0, 0.01)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin @@ -114,6 +168,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") 0.01562861968368434, ], tspan=(0.0, 1.0)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_ec.jl" begin @@ -134,6 +197,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") ], tspan=(0.0, 0.2), coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end @trixi_testset "elixir_euler_source_terms_nonperiodic_hohqmesh.jl" begin @@ -153,6 +225,15 @@ const EXAMPLES_DIR = pkgdir(Trixi, "examples", "p4est_3d_dgsem") 0.048396544302230504, 0.1154589758186293, ]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end end end end # P4estMesh MPI diff --git a/test/test_mpi_t8code_2d.jl b/test/test_mpi_t8code_2d.jl new file mode 100644 index 00000000000..7c7fc03898c --- /dev/null +++ b/test/test_mpi_t8code_2d.jl @@ -0,0 +1,142 @@ +module TestExamplesMPIT8codeMesh2D + +using Test +using Trixi + +include("test_trixi.jl") + +const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_2d_dgsem") + +@testset "T8codeMesh MPI 2D" begin +#! format: noindent + +# Run basic tests +@testset "Examples 2D" begin + # Linear scalar advection + @trixi_testset "elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[8.311947673061856e-6], + linf=[6.627000273229378e-5]) + + @testset "error-based step size control" begin + Trixi.mpi_isroot() && println("-"^100) + Trixi.mpi_isroot() && + println("elixir_advection_basic.jl with error-based step size control") + + sol = solve(ode, RDPK3SpFSAL35(); abstol = 1.0e-4, reltol = 1.0e-4, + ode_default_options()..., callback = callbacks) + summary_callback() + errors = analysis_callback(sol) + if Trixi.mpi_isroot() + @test errors.l2≈[3.3022040342579066e-5] rtol=1.0e-4 + @test errors.linf≈[0.00011787417954578494] rtol=1.0e-4 + end + end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_nonconforming_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_nonconforming_flag.jl"), + l2=[3.198940059144588e-5], + linf=[0.00030636069494005547]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_unstructured_flag.jl"), + l2=[0.0005379687442422346], + linf=[0.007438525029884735]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr_solution_independent.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_solution_independent.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[4.933027431215839e-5], + linf=[0.00048678461161243136], + coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_unstructured_flag.jl"), + l2=[0.001980652042312077], + linf=[0.0328882442132265], + coverage_override=(maxiters = 6,)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"), + l2=[ + 0.0034516244508588046, + 0.0023420334036925493, + 0.0024261923964557187, + 0.004731710454271893, + ], + linf=[ + 0.04155789011775046, + 0.024772109862748914, + 0.03759938693042297, + 0.08039824959535657, + ]) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end +end +end # T8codeMesh MPI + +end # module diff --git a/test/test_mpi_t8code_3d.jl b/test/test_mpi_t8code_3d.jl new file mode 100644 index 00000000000..a15690a7629 --- /dev/null +++ b/test/test_mpi_t8code_3d.jl @@ -0,0 +1,180 @@ +module TestExamplesMPIT8codeMesh3D + +using Test +using Trixi + +include("test_trixi.jl") + +const EXAMPLES_DIR = pkgdir(Trixi, "examples", "t8code_3d_dgsem") + +@testset "T8codeMesh MPI 3D" begin +#! format: noindent + +# Run basic tests +@testset "Examples 3D" begin + # Linear scalar advection + @trixi_testset "elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[0.00016263963870641478], + linf=[0.0014537194925779984]) + + @testset "error-based step size control" begin + Trixi.mpi_isroot() && println("-"^100) + Trixi.mpi_isroot() && + println("elixir_advection_basic.jl with error-based step size control") + + sol = solve(ode, RDPK3SpFSAL35(); abstol = 1.0e-4, reltol = 1.0e-4, + ode_default_options()..., callback = callbacks) + summary_callback() + errors = analysis_callback(sol) + if Trixi.mpi_isroot() + @test errors.l2≈[0.00016800412839949264] rtol=1.0e-4 + @test errors.linf≈[0.0014548839020096516] rtol=1.0e-4 + end + end + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[1.1302812803902801e-5], + linf=[0.0007889950196294793], + # override values are different from the serial tests to ensure each process holds at least + # one element, otherwise OrdinaryDiffEq fails during initialization + coverage_override=(maxiters = 6, + initial_refinement_level = 2, + base_level = 2, med_level = 3, + max_level = 4)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_advection_amr_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_unstructured_curved.jl"), + l2=[2.0556575425846923e-5], + linf=[0.00105682693484822], + tspan=(0.0, 1.0), + coverage_override=(maxiters = 6, + initial_refinement_level = 0, + base_level = 0, med_level = 1, + max_level = 2)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # Compressible Euler + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonconforming_unstructured_curved.jl"), + l2=[ + 4.070355207909268e-5, + 4.4993257426833716e-5, + 5.10588457841744e-5, + 5.102840924036687e-5, + 0.00019986264001630542, + ], + linf=[ + 0.0016987332417202072, + 0.003622956808262634, + 0.002029576258317789, + 0.0024206977281964193, + 0.008526972236273522, + ], + tspan=(0.0, 0.01)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonperiodic.jl"), + l2=[ + 0.0015106060984283647, + 0.0014733349038567685, + 0.00147333490385685, + 0.001473334903856929, + 0.0028149479453087093, + ], + linf=[ + 0.008070806335238156, + 0.009007245083113125, + 0.009007245083121784, + 0.009007245083102688, + 0.01562861968368434, + ], + tspan=(0.0, 1.0)) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + @trixi_testset "elixir_euler_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_ec.jl"), + l2=[ + 0.010380390326164493, + 0.006192950051354618, + 0.005970674274073704, + 0.005965831290564327, + 0.02628875593094754, + ], + linf=[ + 0.3326911600075694, + 0.2824952141320467, + 0.41401037398065543, + 0.45574161423218573, + 0.8099577682187109, + ], + tspan=(0.0, 0.2), + coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end +end +end # T8codeMesh MPI + +end # module diff --git a/test/test_mpi_tree.jl b/test/test_mpi_tree.jl index 0831f6a1313..6351a405b5d 100644 --- a/test/test_mpi_tree.jl +++ b/test/test_mpi_tree.jl @@ -76,7 +76,8 @@ CI_ON_WINDOWS = (get(ENV, "GITHUB_ACTIONS", false) == "true") && Sys.iswindows() # Here, we also test that SaveSolutionCallback prints multiple mesh files with AMR # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" - isdir(outdir) && rm(outdir, recursive = true) + Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) + Trixi.MPI.Barrier(Trixi.mpi_comm()) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_refine_twice.jl"), l2=[0.00020547512522578292], diff --git a/test/test_p4est_2d.jl b/test/test_p4est_2d.jl index cebc2917d52..121001b35ff 100644 --- a/test/test_p4est_2d.jl +++ b/test/test_p4est_2d.jl @@ -391,6 +391,27 @@ end end end +@trixi_testset "elixir_euler_NACA6412airfoil_mach2.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_NACA6412airfoil_mach2.jl"), + l2=[ + 0.19107654776276498, 0.3545913719444839, + 0.18492730895077583, 0.817927213517244, + ], + linf=[ + 2.5397624311491946, 2.7075156425517917, 2.200980534211764, + 9.031153939238115, + ], + tspan=(0.0, 0.1)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_eulergravity_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulergravity_convergence.jl"), l2=[ diff --git a/test/test_p4est_3d.jl b/test/test_p4est_3d.jl index 4a2d2112c99..ea7d9193add 100644 --- a/test/test_p4est_3d.jl +++ b/test/test_p4est_3d.jl @@ -234,6 +234,29 @@ end end end +@trixi_testset "elixir_euler_free_stream_boundaries.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_free_stream_boundaries.jl"), + l2=[ + 6.530157034651212e-16, 1.6057829680004379e-15, + 3.31107455378537e-15, 3.908829498281281e-15, + 5.048390610424672e-15, + ], + linf=[ + 4.884981308350689e-15, 1.1921019726912618e-14, + 1.5432100042289676e-14, 2.298161660974074e-14, + 6.039613253960852e-14, + ]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_euler_free_stream_extruded.jl with HLLC FLux" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream_extruded.jl"), l2=[ @@ -380,18 +403,18 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_circular_wind_nonconforming.jl"), l2=[ - 1.573832094977477e-7, - 3.863090659429634e-5, - 3.867293305754584e-5, - 3.686550296950078e-5, - 0.05508968493733932, + 1.5737711609657832e-7, + 3.8630261900166194e-5, + 3.8672287531936816e-5, + 3.6865116098660796e-5, + 0.05508620970403884, ], linf=[ - 2.2695202613887133e-6, - 0.0005314968179916946, - 0.0005314969614147458, - 0.0005130280733059617, - 0.7944959432352334, + 2.268845333053271e-6, + 0.000531462302113539, + 0.0005314624461298934, + 0.0005129931254772464, + 0.7942778058932163, ], tspan=(0.0, 2e2), coverage_override=(trees_per_cube_face = (1, 1), polydeg = 3)) # Prevent long compile time in CI @@ -409,18 +432,18 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_baroclinic_instability.jl"), l2=[ - 6.725065410642336e-7, - 0.00021710117340245454, - 0.000438679759422352, - 0.00020836356588024185, - 0.07602006689579247, + 6.725093801700048e-7, + 0.00021710076010951073, + 0.0004386796338203878, + 0.00020836270267103122, + 0.07601887903440395, ], linf=[ - 1.9101671995258585e-5, - 0.029803626911022396, - 0.04847630924006063, - 0.022001371349740104, - 4.847761006938526, + 1.9107530539574924e-5, + 0.02980358831035801, + 0.048476331898047564, + 0.02200137344113612, + 4.848310144356219, ], tspan=(0.0, 1e2), # Decrease tolerance of adaptive time stepping to get similar results across different systems diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index 6632cd0bb27..9f1382caa62 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -218,9 +218,9 @@ end "elixir_advection_diffusion.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 8 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 8)]) + num_leaves = length(LLID) + @assert num_leaves % 8 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 8)]) tspan = (0.0, 1.5) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), @@ -414,9 +414,9 @@ end "elixir_navierstokes_convergence.jl"), tspan=(0.0, 0.0), initial_refinement_level=3) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 4 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 4)]) + num_leaves = length(LLID) + @assert num_leaves % 4 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 4)]) tspan = (0.0, 0.5) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver; @@ -561,8 +561,8 @@ end @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_advection_diffusion_nonperiodic_amr.jl"), tspan=(0.0, 0.01), - l2=[0.00793438523666649], - linf=[0.11030633127144573]) + l2=[0.007933791324450538], + linf=[0.11029480573492567]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index 6fbfb8259d4..1eaa9f51a56 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -252,9 +252,9 @@ end "elixir_navierstokes_convergence.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 16 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 16)]) + num_leaves = length(LLID) + @assert num_leaves % 16 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 16)]) tspan = (0.0, 0.25) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver; @@ -325,9 +325,9 @@ end "elixir_navierstokes_taylor_green_vortex.jl"), tspan=(0.0, 0.0)) LLID = Trixi.local_leaf_cells(mesh.tree) - num_leafs = length(LLID) - @assert num_leafs % 32 == 0 - Trixi.refine!(mesh.tree, LLID[1:Int(num_leafs / 32)]) + num_leaves = length(LLID) + @assert num_leaves % 32 == 0 + Trixi.refine!(mesh.tree, LLID[1:Int(num_leaves / 32)]) tspan = (0.0, 0.1) semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), initial_condition, solver) @@ -429,8 +429,8 @@ end "elixir_advection_diffusion_amr.jl"), l2=[0.000355780485397024], linf=[0.0010810770271614256]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] @@ -444,8 +444,8 @@ end "elixir_advection_diffusion_nonperiodic.jl"), l2=[0.0009808996243280868], linf=[0.01732621559135459]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] @@ -472,8 +472,8 @@ end 0.12129218723807476, 0.8433893297612087, ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] @@ -495,8 +495,8 @@ end 0.6782397526873181, 0.17663702154066238, 0.17663702154066266, 0.17663702154066238, 1.7327849844825238, ]) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) let t = sol.t[end] u_ode = sol.u[end] diff --git a/test/test_structured_1d.jl b/test/test_structured_1d.jl index f0eecfa9acd..fea06554c57 100644 --- a/test/test_structured_1d.jl +++ b/test/test_structured_1d.jl @@ -138,6 +138,21 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "elixir_traffic_flow_lwr_greenlight.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_traffic_flow_lwr_greenlight.jl"), + l2=[0.2005523261652845], + linf=[0.5052827913468407]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 1addc29e3e6..522510a42e3 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -33,14 +33,34 @@ end @trixi_testset "elixir_advection_coupled.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_coupled.jl"), - l2=[7.816742843181738e-6, 7.816742843196112e-6], - linf=[6.314906965543265e-5, 6.314906965410039e-5], + l2=[ + 7.816742843336293e-6, + 7.816742843340186e-6, + 7.816742843025513e-6, + 7.816742843061526e-6, + ], + linf=[ + 6.314906965276812e-5, + 6.314906965187994e-5, + 6.31490696496595e-5, + 6.314906965032563e-5, + ], coverage_override=(maxiters = 10^5,)) @testset "analysis_callback(sol) for AnalysisCallbackCoupled" begin errors = analysis_callback(sol) - @test errors.l2≈[7.816742843181738e-6, 7.816742843196112e-6] rtol=1.0e-4 - @test errors.linf≈[6.314906965543265e-5, 6.314906965410039e-5] rtol=1.0e-4 + @test errors.l2≈[ + 7.816742843336293e-6, + 7.816742843340186e-6, + 7.816742843025513e-6, + 7.816742843061526e-6, + ] rtol=1.0e-4 + @test errors.linf≈[ + 6.314906965276812e-5, + 6.314906965187994e-5, + 6.31490696496595e-5, + 6.314906965032563e-5, + ] rtol=1.0e-4 # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -611,6 +631,33 @@ end end end +@trixi_testset "elixir_euler_warm_bubble.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_warm_bubble.jl"), + l2=[ + 0.00019387402388722496, + 0.03086514388623955, + 0.04541427917165, + 43.892826583444716, + ], + linf=[ + 0.0015942305974430138, + 0.17449778969139373, + 0.3729704262394843, + 307.6706958565337, + ], + cells_per_dimension=(32, 16), + tspan=(0.0, 10.0)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 100 + end +end + @trixi_testset "elixir_eulerpolytropic_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_convergence.jl"), l2=[ diff --git a/test/test_t8code_2d.jl b/test/test_t8code_2d.jl index b3e19471323..d536a6dd73a 100644 --- a/test/test_t8code_2d.jl +++ b/test/test_t8code_2d.jl @@ -30,7 +30,20 @@ mkdir(outdir) end end +@trixi_testset "test check_for_negative_volumes" begin + @test_warn "Discovered negative volumes" begin + # Unstructured mesh with six cells which have left-handed node ordering. + mesh_file = Trixi.download("https://gist.githubusercontent.com/jmark/bfe0d45f8e369298d6cc637733819013/raw/cecf86edecc736e8b3e06e354c494b2052d41f7a/rectangle_with_negative_volumes.msh", + joinpath(EXAMPLES_DIR, + "rectangle_with_negative_volumes.msh")) + + # This call should throw a warning about negative volumes detected. + mesh = T8codeMesh(mesh_file, 2) + end +end + @trixi_testset "elixir_advection_basic.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), # Expected errors are exactly the same as with TreeMesh! l2=[8.311947673061856e-6], @@ -46,6 +59,7 @@ end end @trixi_testset "elixir_advection_nonconforming_flag.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_nonconforming_flag.jl"), l2=[3.198940059144588e-5], @@ -61,6 +75,7 @@ end end @trixi_testset "elixir_advection_unstructured_flag.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_unstructured_flag.jl"), l2=[0.0005379687442422346], linf=[0.007438525029884735]) @@ -91,6 +106,7 @@ end end @trixi_testset "elixir_advection_amr_solution_independent.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_solution_independent.jl"), # Expected errors are exactly the same as with StructuredMesh! @@ -108,6 +124,7 @@ end end @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_flag.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_source_terms_nonconforming_unstructured_flag.jl"), l2=[ @@ -133,6 +150,7 @@ end end @trixi_testset "elixir_euler_free_stream.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream.jl"), l2=[ 2.063350241405049e-15, @@ -153,6 +171,7 @@ end end @trixi_testset "elixir_euler_shockcapturing_ec.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_shockcapturing_ec.jl"), l2=[ 9.53984675e-02, @@ -178,6 +197,8 @@ end end @trixi_testset "elixir_euler_sedov.jl" begin + # This test is identical to the one in `test_p4est_2d.jl` besides minor + # deviations in the expected error norms. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_sedov.jl"), l2=[ 3.76149952e-01, @@ -203,6 +224,7 @@ end end @trixi_testset "elixir_shallowwater_source_terms.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ 9.168126407325352e-5, @@ -228,6 +250,7 @@ end end @trixi_testset "elixir_mhd_alfven_wave.jl" begin + # This test is identical to the one in `test_p4est_2d.jl`. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_alfven_wave.jl"), l2=[1.0513414461545583e-5, 1.0517900957166411e-6, 1.0517900957304043e-6, 1.511816606372376e-6, @@ -250,6 +273,8 @@ end end @trixi_testset "elixir_mhd_rotor.jl" begin + # This test is identical to the one in `test_p4est_2d.jl` besides minor + # deviations in the expected error norms. @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_rotor.jl"), l2=[0.44211360369891683, 0.8805178316216257, 0.8262710688468049, 0.0, diff --git a/test/test_t8code_3d.jl b/test/test_t8code_3d.jl new file mode 100644 index 00000000000..4232cf04094 --- /dev/null +++ b/test/test_t8code_3d.jl @@ -0,0 +1,279 @@ +module TestExamplesT8codeMesh3D + +using Test +using Trixi + +include("test_trixi.jl") + +EXAMPLES_DIR = joinpath(examples_dir(), "t8code_3d_dgsem") + +# Start with a clean environment: remove Trixi.jl output directory if it exists +outdir = "out" +isdir(outdir) && rm(outdir, recursive = true) +mkdir(outdir) + +@testset "T8codeMesh3D" begin + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_advection_basic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[0.00016263963870641478], + linf=[0.0014537194925779984]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_advection_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_unstructured_curved.jl"), + l2=[0.0004750004258546538], + linf=[0.026527551737137167]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_advection_nonconforming.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_nonconforming.jl"), + l2=[0.00253595715323843], + linf=[0.016486952252155795]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl` besides minor + # deviations from the expected error norms. + @trixi_testset "elixir_advection_amr.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr.jl"), + # Expected errors are exactly the same as with TreeMesh! + l2=[1.1302812803902801e-5], + linf=[0.0007889950196294793], + coverage_override=(maxiters = 6, initial_refinement_level = 1, + base_level = 1, med_level = 2, max_level = 3)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl` besides minor + # deviations from the expected error norms. + @trixi_testset "elixir_advection_amr_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_advection_amr_unstructured_curved.jl"), + l2=[2.0556575425846923e-5], + linf=[0.00105682693484822], + tspan=(0.0, 1.0), + coverage_override=(maxiters = 6, initial_refinement_level = 0, + base_level = 0, med_level = 1, max_level = 2)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_euler_source_terms_nonconforming_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonconforming_unstructured_curved.jl"), + l2=[ + 4.070355207909268e-5, + 4.4993257426833716e-5, + 5.10588457841744e-5, + 5.102840924036687e-5, + 0.00019986264001630542, + ], + linf=[ + 0.0016987332417202072, + 0.003622956808262634, + 0.002029576258317789, + 0.0024206977281964193, + 0.008526972236273522, + ], + tspan=(0.0, 0.01)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_euler_source_terms_nonperiodic.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_source_terms_nonperiodic.jl"), + l2=[ + 0.0015106060984283647, + 0.0014733349038567685, + 0.00147333490385685, + 0.001473334903856929, + 0.0028149479453087093, + ], + linf=[ + 0.008070806335238156, + 0.009007245083113125, + 0.009007245083121784, + 0.009007245083102688, + 0.01562861968368434, + ], + tspan=(0.0, 1.0)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_euler_free_stream.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream.jl"), + l2=[ + 5.162664597942288e-15, + 1.941857343642486e-14, + 2.0232366394187278e-14, + 2.3381518645408552e-14, + 7.083114561232324e-14, + ], + linf=[ + 7.269740365245525e-13, + 3.289868377720495e-12, + 4.440087186807773e-12, + 3.8686831516088205e-12, + 9.412914891981927e-12, + ], + tspan=(0.0, 0.03)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_euler_free_stream_extruded.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream_extruded.jl"), + l2=[ + 8.444868392439035e-16, + 4.889826056731442e-15, + 2.2921260987087585e-15, + 4.268460455702414e-15, + 1.1356712092620279e-14, + ], + linf=[ + 7.749356711883593e-14, + 2.8792246364872653e-13, + 1.1121659149182506e-13, + 3.3228975127030935e-13, + 9.592326932761353e-13, + ], + tspan=(0.0, 0.1)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl`. + @trixi_testset "elixir_euler_ec.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_ec.jl"), + l2=[ + 0.010380390326164493, + 0.006192950051354618, + 0.005970674274073704, + 0.005965831290564327, + 0.02628875593094754, + ], + linf=[ + 0.3326911600075694, + 0.2824952141320467, + 0.41401037398065543, + 0.45574161423218573, + 0.8099577682187109, + ], + tspan=(0.0, 0.2), + coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + + # This test is identical to the one in `test_p4est_3d.jl` besides minor + # deviations in the expected error norms. + @trixi_testset "elixir_euler_sedov.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_sedov.jl"), + l2=[ + 7.82070951e-02, + 4.33260474e-02, + 4.33260474e-02, + 4.33260474e-02, + 3.75260911e-01, + ], + linf=[ + 7.45329845e-01, + 3.21754792e-01, + 3.21754792e-01, + 3.21754792e-01, + 4.76151527e+00, + ], + tspan=(0.0, 0.3), + coverage_override=(polydeg = 3,)) # Prevent long compile time in CI + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end +end + +# Clean up afterwards: delete Trixi.jl output directory +@test_nowarn rm(outdir, recursive = true) + +end # module diff --git a/test/test_threaded.jl b/test/test_threaded.jl index dbbcbf4c7ce..a8a1b1b425a 100644 --- a/test/test_threaded.jl +++ b/test/test_threaded.jl @@ -8,6 +8,7 @@ include("test_trixi.jl") # Start with a clean environment: remove Trixi.jl output directory if it exists outdir = "out" Trixi.mpi_isroot() && isdir(outdir) && rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) @testset "Threaded tests" begin #! format: noindent @@ -471,5 +472,6 @@ end # Clean up afterwards: delete Trixi.jl output directory Trixi.mpi_isroot() && isdir(outdir) && @test_nowarn rm(outdir, recursive = true) +Trixi.MPI.Barrier(Trixi.mpi_comm()) end # module diff --git a/test/test_tree_1d.jl b/test/test_tree_1d.jl index 4654f6313f7..8b470278ffd 100644 --- a/test/test_tree_1d.jl +++ b/test/test_tree_1d.jl @@ -47,6 +47,9 @@ isdir(outdir) && rm(outdir, recursive = true) # FDSBP methods on the TreeMesh include("test_tree_1d_fdsbp.jl") + + # Traffic flow LWR + include("test_tree_1d_traffic_flow_lwr.jl") end # Coverage test for all initial conditions diff --git a/test/test_tree_1d_traffic_flow_lwr.jl b/test/test_tree_1d_traffic_flow_lwr.jl new file mode 100644 index 00000000000..54412e314b3 --- /dev/null +++ b/test/test_tree_1d_traffic_flow_lwr.jl @@ -0,0 +1,42 @@ +module TestExamples1DTrafficFlowLWR + +using Test +using Trixi + +include("test_trixi.jl") + +EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") + +@testset "Traffic-flow LWR" begin +#! format: noindent + +@trixi_testset "elixir_traffic_flow_lwr_convergence.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_traffic_flow_lwr_convergence.jl"), + l2=[0.0008455067389588569], + linf=[0.004591951086623913]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_traffic_flow_lwr_trafficjam.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_traffic_flow_lwr_trafficjam.jl"), + l2=[0.1761758135539748], linf=[0.5]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end +end + +end # module diff --git a/test/test_tree_2d_euler.jl b/test/test_tree_2d_euler.jl index 65899cd5263..b937abe92c0 100644 --- a/test/test_tree_2d_euler.jl +++ b/test/test_tree_2d_euler.jl @@ -581,6 +581,32 @@ end end end +@trixi_testset "elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_euler_kelvin_helmholtz_instability_sc_subcell.jl"), + l2=[ + 0.42185634563805724, + 0.1686471269704017, + 0.18240674916968103, + 0.17858250604280654, + ], + linf=[ + 1.7012978064377158, + 0.7149714986746726, + 0.5822547982757897, + 0.7300051017382696, + ], + tspan=(0.0, 2.0)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 15000 + end +end + @trixi_testset "elixir_euler_colliding_flow.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_colliding_flow.jl"), l2=[ @@ -834,6 +860,32 @@ end end end +@trixi_testset "elixir_euler_warm_bubble.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_warm_bubble.jl"), + l2=[ + 0.0001379946769624388, + 0.02078779689715382, + 0.033237241571263176, + 31.36068872331705, + ], + linf=[ + 0.0016286690573188434, + 0.15623770697198225, + 0.3341371832270615, + 334.5373488726036, + ], + tspan=(0.0, 10.0), + initial_refinement_level=4) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 100 + end +end + # Coverage test for all initial conditions @testset "Compressible Euler: Tests for initial conditions" begin @trixi_testset "elixir_euler_vortex.jl one step with initial_condition_constant" begin diff --git a/test/test_tree_2d_mhd.jl b/test/test_tree_2d_mhd.jl index 953c077c0a3..1f8458075aa 100644 --- a/test/test_tree_2d_mhd.jl +++ b/test/test_tree_2d_mhd.jl @@ -332,24 +332,28 @@ end @trixi_testset "elixir_mhd_shockcapturing_subcell.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_shockcapturing_subcell.jl"), - l2=[2.9974425783503109e-02, - 7.2849646345685956e-02, - 7.2488477174662239e-02, + l2=[ + 3.2064026219236076e-02, + 7.2461094392606618e-02, + 7.2380202888062711e-02, 0.0000000000000000e+00, - 1.2507971380965512e+00, - 1.8929505145499678e-02, - 1.2218606317164420e-02, + 8.6293936673145932e-01, + 8.4091669534557805e-03, + 5.2156364913231732e-03, 0.0000000000000000e+00, - 3.0154796910479838e-03], - linf=[3.2147382412340830e-01, - 1.3709471664007811e+00, - 1.3465154685288383e+00, + 2.0786952301129021e-04, + ], + linf=[ + 3.8778760255775635e-01, + 9.4666683953698927e-01, + 9.4618924645661928e-01, 0.0000000000000000e+00, - 1.6051257523415284e+01, - 3.0564266749926644e-01, - 2.3908016329805595e-01, + 1.0980297261521951e+01, + 1.0264404591009069e-01, + 1.0655686942176350e-01, 0.0000000000000000e+00, - 1.3711262178549158e-01], + 6.1013422157115546e-03, + ], tspan=(0.0, 0.003)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_unit.jl b/test/test_unit.jl index 817b4cd550d..c1379587cc8 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -416,7 +416,8 @@ end indicator_hg = IndicatorHennemannGassner(1.0, 0.0, true, "variable", "cache") @test_nowarn show(stdout, indicator_hg) - limiter_idp = SubcellLimiterIDP(true, [1], true, [1], 0.1, "cache") + limiter_idp = SubcellLimiterIDP(true, [1], true, [1], ["variable"], 0.1, "cache", 1, + (1.0, 1.0), 1.0) @test_nowarn show(stdout, limiter_idp) # TODO: TrixiShallowWater: move unit test @@ -857,6 +858,30 @@ end end end +@timed_testset "Consistency check for Lax-Friedrich flux: Polytropic CEE" begin + for gamma in [1.4, 1.0, 5 / 3] + kappa = 0.5 # Scaling factor for the pressure. + equations = PolytropicEulerEquations2D(gamma, kappa) + u = SVector(1.1, -0.5, 2.34) + + orientations = [1, 2] + for orientation in orientations + @test flux_lax_friedrichs(u, u, orientation, equations) ≈ + flux(u, orientation, equations) + end + + normal_directions = [SVector(1.0, 0.0), + SVector(0.0, 1.0), + SVector(0.5, -0.5), + SVector(-1.2, 0.3)] + + for normal_direction in normal_directions + @test flux_lax_friedrichs(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end + end +end + @timed_testset "Consistency check for HLL flux with Davis wave speed estimates: LEE" begin flux_hll = FluxHLL(min_max_speed_davis) @@ -1220,6 +1245,26 @@ end end end +@testset "Consistency check for `gradient_conservative` routine" begin + # Set up conservative variables, equations + u = [ + 0.5011914484393387, + 0.8829127712445113, + 0.43024132987932817, + 0.7560616633050348, + ] + + equations = CompressibleEulerEquations2D(1.4) + + # Define wrapper function for pressure in order to call default implementation + function pressure_test(u, equations) + return pressure(u, equations) + end + + @test Trixi.gradient_conservative(pressure_test, u, equations) ≈ + Trixi.gradient_conservative(pressure, u, equations) +end + @testset "Equivalent Fluxes" begin # Set up equations and dummy conservative variables state # Burgers' Equation @@ -1287,6 +1332,49 @@ end end end +@timed_testset "Consistency check for LMARS flux" begin + equations = CompressibleEulerEquations2D(1.4) + flux_lmars = FluxLMARS(340) + + normal_directions = [SVector(1.0, 0.0), + SVector(0.0, 1.0), + SVector(0.5, -0.5), + SVector(-1.2, 0.3)] + orientations = [1, 2] + u_values = [SVector(1.0, 0.5, -0.7, 1.0), + SVector(1.5, -0.2, 0.1, 5.0)] + + for u in u_values, orientation in orientations + @test flux_lmars(u, u, orientation, equations) ≈ + flux(u, orientation, equations) + end + + for u in u_values, normal_direction in normal_directions + @test flux_lmars(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end + + equations = CompressibleEulerEquations3D(1.4) + normal_directions = [SVector(1.0, 0.0, 0.0), + SVector(0.0, 1.0, 0.0), + SVector(0.0, 0.0, 1.0), + SVector(0.5, -0.5, 0.2), + SVector(-1.2, 0.3, 1.4)] + orientations = [1, 2, 3] + u_values = [SVector(1.0, 0.5, -0.7, 0.1, 1.0), + SVector(1.5, -0.2, 0.1, 0.2, 5.0)] + + for u in u_values, orientation in orientations + @test flux_lmars(u, u, orientation, equations) ≈ + flux(u, orientation, equations) + end + + for u in u_values, normal_direction in normal_directions + @test flux_lmars(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end +end + @testset "FluxRotated vs. direct implementation" begin @timed_testset "CompressibleEulerMulticomponentEquations2D" begin equations = CompressibleEulerMulticomponentEquations2D(gammas = (1.4, 1.4), @@ -1320,7 +1408,8 @@ end u_values = [SVector(1.0, 0.5, -0.7, 1.0), SVector(1.5, -0.2, 0.1, 5.0)] fluxes = [flux_central, flux_ranocha, flux_shima_etal, flux_kennedy_gruber, - flux_hll, FluxHLL(min_max_speed_davis), flux_hlle, flux_hllc] + FluxLMARS(340), flux_hll, FluxHLL(min_max_speed_davis), flux_hlle, flux_hllc, + ] for f_std in fluxes f_rot = FluxRotated(f_std) @@ -1464,103 +1553,6 @@ end @test mesh.boundary_faces[:entire_boundary] == [1, 2] end - -@testset "trixi_include" begin - @trixi_testset "Basic" begin - example = """ - x = 4 - """ - - filename = tempname() - try - open(filename, "w") do file - write(file, example) - end - - # Use `@trixi_testset`, which wraps code in a temporary module, and call - # `trixi_include` with `@__MODULE__` in order to isolate this test. - @test_warn "You just called" trixi_include(@__MODULE__, filename) - @test @isdefined x - @test x == 4 - - @test_warn "You just called" trixi_include(@__MODULE__, filename, x = 7) - @test x == 7 - - @test_throws "assignment `y` not found in expression" trixi_include(@__MODULE__, - filename, - y = 3) - finally - rm(filename, force = true) - end - end - - @trixi_testset "With `solve` Without `maxiters`" begin - # `trixi_include` assumes this to be the `solve` function of OrdinaryDiffEq, - # and therefore tries to insert the kwarg `maxiters`, which will fail here. - example = """ - solve() = 0 - x = solve() - """ - - filename = tempname() - try - open(filename, "w") do file - write(file, example) - end - - # Use `@trixi_testset`, which wraps code in a temporary module, and call - # `trixi_include` with `@__MODULE__` in order to isolate this test. - @test_throws "no method matching solve(; maxiters::Int64)" trixi_include(@__MODULE__, - filename) - - @test_throws "no method matching solve(; maxiters::Int64)" trixi_include(@__MODULE__, - filename, - maxiters = 3) - finally - rm(filename, force = true) - end - end - - @trixi_testset "With `solve` with `maxiters`" begin - # We need another example file that we include with `Base.include` first, in order to - # define the `solve` method without `trixi_include` trying to insert `maxiters` kwargs. - # Then, we can test that `trixi_include` inserts the kwarg in the `solve()` call. - example1 = """ - solve(; maxiters=0) = maxiters - """ - - example2 = """ - x = solve() - """ - - filename1 = tempname() - filename2 = tempname() - try - open(filename1, "w") do file - write(file, example1) - end - open(filename2, "w") do file - write(file, example2) - end - - # Use `@trixi_testset`, which wraps code in a temporary module, and call - # `Base.include` and `trixi_include` with `@__MODULE__` in order to isolate this test. - Base.include(@__MODULE__, filename1) - @test_warn "You just called" trixi_include(@__MODULE__, filename2) - @test @isdefined x - # This is the default `maxiters` inserted by `trixi_include` - @test x == 10^5 - - @test_warn "You just called" trixi_include(@__MODULE__, filename2, - maxiters = 7) - # Test that `maxiters` got overwritten - @test x == 7 - finally - rm(filename1, force = true) - rm(filename2, force = true) - end - end -end end end #module diff --git a/utils/trixi-format-file.jl b/utils/trixi-format-file.jl index c4d8e7c9032..9b9a0e4949c 100755 --- a/utils/trixi-format-file.jl +++ b/utils/trixi-format-file.jl @@ -2,7 +2,8 @@ using Pkg Pkg.activate(; temp = true, io = devnull) -Pkg.add("JuliaFormatter"; preserve = PRESERVE_ALL, io = devnull) +Pkg.add(PackageSpec(name = "JuliaFormatter", version = "1.0.45"); preserve = PRESERVE_ALL, + io = devnull) using JuliaFormatter: format_file diff --git a/utils/trixi-format.jl b/utils/trixi-format.jl index d1e7efa656a..63f14078807 100755 --- a/utils/trixi-format.jl +++ b/utils/trixi-format.jl @@ -2,7 +2,8 @@ using Pkg Pkg.activate(; temp = true, io = devnull) -Pkg.add("JuliaFormatter"; preserve = PRESERVE_ALL, io = devnull) +Pkg.add(PackageSpec(name = "JuliaFormatter", version = "1.0.45"); preserve = PRESERVE_ALL, + io = devnull) using JuliaFormatter: format