From bd060b6d140b2c4f8f717a89899e867593e141c1 Mon Sep 17 00:00:00 2001 From: Andrew Winters Date: Thu, 14 Mar 2024 15:49:16 +0100 Subject: [PATCH] Add functionality for `TimeSeries` callback on `UnstructuredMesh2D` (#1855) * add functionality for TimeSeries callback on UnstructuredMesh2D * Update src/callbacks_step/time_series_dg.jl Co-authored-by: Hendrik Ranocha * Apply suggestions from code review Co-authored-by: Daniel Doehring * add strategy to correctly locate a gauge point within a curvilinear element * add sanity check that the Newton solution is correct * run formatter * implement a more general approach that also works on curved element without issue * run formatter * forgot to format the examples * Apply suggestions from code review Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring * working version of the element finding routine * run formatter * add new elixir for the time series callback * add additional test for the time series callback on an unstructured mesh * add appropriate test * update docstring * add comment about the barycenter computation * add simplifications and comments from code review * adjust variable name to avoid ugly formatting * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * fix variable name * remove Experimental status from the TimeSeriesCallback * move new TimeSeries test into the unit testing * add output_directory creation if not already done. Necessary if this callback is used without the SaveSolution callback * formatting * update test mesh to have one straight-sided element to trigger inverse bi-linear interpolation * update test values * add news item * forgot to update all new test values on the new mesh * update tests and use coverage override to avoid redundancy --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring --- NEWS.md | 12 +- .../elixir_euler_time_series.jl | 115 ++++++++ src/callbacks_step/time_series.jl | 9 +- src/callbacks_step/time_series_dg.jl | 6 +- src/callbacks_step/time_series_dg2d.jl | 279 +++++++++++++++++- test/test_unit.jl | 1 + test/test_unstructured_2d.jl | 33 +++ 7 files changed, 443 insertions(+), 12 deletions(-) create mode 100644 examples/unstructured_2d_dgsem/elixir_euler_time_series.jl diff --git a/NEWS.md b/NEWS.md index d70504d8c85..5b08d51ab89 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,13 +4,19 @@ Trixi.jl follows the interpretation of [semantic versioning (semver)](https://ju used in the Julia ecosystem. Notable changes will be documented in this file for human readability. +## Changes in the v0.7 lifecycle + +#### Added +- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D`. + + ## Changes when updating to v0.7 from v0.6.x #### Added #### Changed -- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` +- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` instead of `min_max_speed_naive`. #### Deprecated @@ -26,7 +32,7 @@ for human readability. #### Added - AMR for hyperbolic-parabolic equations on 3D `P4estMesh` - `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` -- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, +- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, can now be digested by Trixi in 2D and 3D. - Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` - Added Lighthill-Whitham-Richards (LWR) traffic model @@ -40,7 +46,7 @@ for human readability. #### Changed - The wave speed estimates for `flux_hll`, `FluxHLL()` are now consistent across equations. - In particular, the functions `min_max_speed_naive`, `min_max_speed_einfeldt` are now + In particular, the functions `min_max_speed_naive`, `min_max_speed_einfeldt` are now conceptually identical across equations. Users, who have been using `flux_hll` for MHD have now to use `flux_hlle` in order to use the Einfeldt wave speed estimate. diff --git a/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl new file mode 100644 index 00000000000..13233cdadbc --- /dev/null +++ b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl @@ -0,0 +1,115 @@ +# An elixir that has an alternative convergence test that uses +# the `TimeSeriesCallback` on several gauge points. Many of the +# gauge points are selected as "stress tests" for the element +# identification, e.g., a gauge point that lies on an +# element corner of a curvilinear mesh + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +# Modify the manufactured solution test to use `L = sqrt(2)` +# in the initial condition and source terms +function initial_condition_convergence_shifted(x, t, + equations::CompressibleEulerEquations2D) + c = 2 + A = 0.1 + L = sqrt(2) + f = 1 / L + ω = 2 * pi * f + ini = c + A * sin(ω * (x[1] + x[2] - t)) + + rho = ini + rho_v1 = ini + rho_v2 = ini + rho_e = ini^2 + + return SVector(rho, rho_v1, rho_v2, rho_e) +end + +@inline function source_terms_convergence_shifted(u, x, t, + equations::CompressibleEulerEquations2D) + # Same settings as in `initial_condition` + c = 2 + A = 0.1 + L = sqrt(2) + f = 1 / L + ω = 2 * pi * f + γ = equations.gamma + + x1, x2 = x + si, co = sincos(ω * (x1 + x2 - t)) + rho = c + A * si + rho_x = ω * A * co + # Note that d/dt rho = -d/dx rho = -d/dy rho. + + tmp = (2 * rho - 1) * (γ - 1) + + du1 = rho_x + du2 = rho_x * (1 + tmp) + du3 = du2 + du4 = 2 * rho_x * (rho + tmp) + + return SVector(du1, du2, du3, du4) +end + +initial_condition = initial_condition_convergence_shifted + +source_term = source_terms_convergence_shifted + +############################################################################### +# Get the DG approximation space + +solver = DGSEM(polydeg = 6, surface_flux = flux_lax_friedrichs) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/b434e724e3972a9c4ee48d58c80cdcdb/raw/55c916cd8c0294a2d4a836e960dac7247b7c8ccf/mesh_multiple_flips.mesh", + joinpath(@__DIR__, "mesh_multiple_flips.mesh")) + +mesh = UnstructuredMesh2D(mesh_file, periodicity = true) + +############################################################################### +# create the semi discretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_term) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +time_series = TimeSeriesCallback(semi, + [(0.75, 0.7), (1.23, 0.302), (0.8, 1.0), + (0.353553390593274, 0.353553390593274), + (0.505, 1.125), (1.37, 0.89), (0.349, 0.7153), + (0.883883476483184, 0.406586401289607), + (sqrt(2), sqrt(2))]; + interval = 10) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + time_series, + alive_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, RDPK3SpFSAL49(); abstol = 1.0e-6, reltol = 1.0e-6, + ode_default_options()..., callback = callbacks); + +summary_callback() # print the timer summary diff --git a/src/callbacks_step/time_series.jl b/src/callbacks_step/time_series.jl index 7baa6b9c5a1..f6d76f0fb15 100644 --- a/src/callbacks_step/time_series.jl +++ b/src/callbacks_step/time_series.jl @@ -23,8 +23,8 @@ After the last time step, the results are stored in an HDF5 file `filename` in d The real data type `RealT` and data type for solution variables `uEltype` default to the respective types used in the solver and the cache. -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. +Currently this callback is only implemented for [`TreeMesh`](@ref) in 2D +and [`UnstructuredMesh2D`](@ref). """ mutable struct TimeSeriesCallback{RealT <: Real, uEltype <: Real, SolutionVariables, VariableNames, Cache} @@ -96,6 +96,11 @@ function TimeSeriesCallback(mesh, equations, solver, cache, point_coordinates; throw(ArgumentError("`point_coordinates` must be a matrix of size n_points × ndims")) end + # create the output folder if it does not exist already + if mpi_isroot() && !isdir(output_directory) + mkpath(output_directory) + end + # Transpose point_coordinates to our usual format [ndims, n_points] # Note: They are accepted in a different format to allow direct input from `readdlm` point_coordinates_ = permutedims(point_coordinates) diff --git a/src/callbacks_step/time_series_dg.jl b/src/callbacks_step/time_series_dg.jl index 1b63979d579..ae394afbbfd 100644 --- a/src/callbacks_step/time_series_dg.jl +++ b/src/callbacks_step/time_series_dg.jl @@ -5,8 +5,10 @@ @muladd begin #! format: noindent -# Store time series file for a TreeMesh with a DG solver -function save_time_series_file(time_series_callback, mesh::TreeMesh, equations, dg::DG) +# Store time series file for a DG solver +function save_time_series_file(time_series_callback, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + equations, dg::DG) @unpack (interval, solution_variables, variable_names, output_directory, filename, point_coordinates, point_data, time, step, time_series_cache) = time_series_callback diff --git a/src/callbacks_step/time_series_dg2d.jl b/src/callbacks_step/time_series_dg2d.jl index c15945d6e16..ad7c6851c80 100644 --- a/src/callbacks_step/time_series_dg2d.jl +++ b/src/callbacks_step/time_series_dg2d.jl @@ -6,7 +6,9 @@ #! format: noindent # Creates cache for time series callback -function create_cache_time_series(point_coordinates, mesh::TreeMesh{2}, dg, cache) +function create_cache_time_series(point_coordinates, + mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, + dg, cache) # Determine element ids for point coordinates element_ids = get_elements_by_coordinates(point_coordinates, mesh, dg, cache) @@ -68,6 +70,144 @@ function get_elements_by_coordinates!(element_ids, coordinates, mesh::TreeMesh, return element_ids end +# Elements on an `UnstructuredMesh2D` are possibly curved. Assume that each +# element is convex, i.e., all interior angles are less than 180 degrees. +# This routine computes the shortest distance from a given point to each element +# surface in the mesh. These distances then indicate possible candidate elements. +# From these candidates we (essentially) apply a ray casting strategy and identify +# the element in which the point lies by comparing the ray formed by the point to +# the nearest boundary to the rays cast by the candidate element barycenters to the +# boundary. If these rays point in the same direction, then we have identified the +# desired element location. +function get_elements_by_coordinates!(element_ids, coordinates, + mesh::UnstructuredMesh2D, + dg, cache) + if length(element_ids) != size(coordinates, 2) + throw(DimensionMismatch("storage length for element ids does not match the number of coordinates")) + end + + # Reset element ids - 0 indicates "not (yet) found" + element_ids .= 0 + + # Compute and save the barycentric coordinate on each element + bary_centers = zeros(eltype(mesh.corners), 2, mesh.n_elements) + calc_bary_centers!(bary_centers, dg, cache) + + # Iterate over coordinates + distances = zeros(eltype(mesh.corners), mesh.n_elements) + indices = zeros(Int, mesh.n_elements, 2) + for index in 1:length(element_ids) + # Grab the current point for which the element needs found + point = SVector(coordinates[1, index], + coordinates[2, index]) + + # Compute the minimum distance between the `point` and all the element surfaces + # saved into `distances`. The point in `node_coordinates` that gives said minimum + # distance on each element is saved in `indices` + distances, indices = calc_minimum_surface_distance(point, + cache.elements.node_coordinates, + dg, mesh) + + # Get the candidate elements where the `point` might live + candidates = findall(abs.(minimum(distances) .- distances) .< + 500 * eps(eltype(point))) + + # The minimal surface point is on a boundary so it plays no role which candidate + # we use to grab it. So just use the first one + surface_point = SVector(cache.elements.node_coordinates[1, + indices[candidates[1], + 1], + indices[candidates[1], + 2], + candidates[1]], + cache.elements.node_coordinates[2, + indices[candidates[1], + 1], + indices[candidates[1], + 2], + candidates[1]]) + + # Compute the vector pointing from the current `point` toward the surface + P = surface_point - point + + # If the vector `P` is the zero vector then this `point` is at an element corner or + # on a surface. In this case the choice of a candidate element is ambiguous and + # we just use the first candidate. However, solutions might differ at discontinuous + # interfaces such that this choice may influence the result. + if sum(P .* P) < 500 * eps(eltype(point)) + element_ids[index] = candidates[1] + continue + end + + # Loop through all the element candidates until we find a vector from the barycenter + # to the surface that points in the same direction as the current `point` vector. + # This then gives us the correct element. + for element in 1:length(candidates) + bary_center = SVector(bary_centers[1, candidates[element]], + bary_centers[2, candidates[element]]) + # Vector pointing from the barycenter toward the minimal `surface_point` + B = surface_point - bary_center + if sum(P .* B) > zero(eltype(bary_center)) + element_ids[index] = candidates[element] + break + end + end + end + + return element_ids +end + +# Use the available `node_coordinates` on each element to compute and save the barycenter. +# In essence, the barycenter is like an average where all the x and y node coordinates are +# summed and then we divide by the total number of degrees of freedom on the element, i.e., +# the value of `n^2` in two spatial dimensions. +@inline function calc_bary_centers!(bary_centers, dg, cache) + n = nnodes(dg) + @views for element in eachelement(dg, cache) + bary_centers[1, element] = sum(cache.elements.node_coordinates[1, :, :, + element]) / n^2 + bary_centers[2, element] = sum(cache.elements.node_coordinates[2, :, :, + element]) / n^2 + end + return nothing +end + +# Compute the shortest distance from a `point` to the surface of each element +# using the available `node_coordinates`. Also return the index pair of this +# minimum surface point location. We compute and store in `min_distance` +# the squared norm to avoid computing computationally more expensive square roots. +# Note! Could be made more accurate if the `node_coordinates` were super-sampled +# and reinterpolated onto a higher polynomial degree before this computation. +function calc_minimum_surface_distance(point, node_coordinates, + dg, mesh::UnstructuredMesh2D) + n = nnodes(dg) + min_distance2 = Inf * ones(eltype(mesh.corners), length(mesh)) + indices = zeros(Int, length(mesh), 2) + for k in 1:length(mesh) + # used to ensure that only boundary points are used + on_surface = MVector(false, false) + for j in 1:n + on_surface[2] = (j == 1) || (j == n) + for i in 1:n + on_surface[1] = (i == 1) || (i == n) + if !any(on_surface) + continue + end + node = SVector(node_coordinates[1, i, j, k], + node_coordinates[2, i, j, k]) + distance2 = sum(abs2, node - point) + if distance2 < min_distance2[k] + min_distance2[k] = distance2 + indices[k, 1] = i + indices[k, 2] = j + end + end + end + end + + return min_distance2, indices +end + function get_elements_by_coordinates(coordinates, mesh, dg, cache) element_ids = Vector{Int}(undef, size(coordinates, 2)) get_elements_by_coordinates!(element_ids, coordinates, mesh, dg, cache) @@ -106,8 +246,137 @@ function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, return interpolating_polynomials end -function calc_interpolating_polynomials(coordinates, element_ids, mesh::TreeMesh, dg, - cache) +function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, + element_ids, + mesh::UnstructuredMesh2D, dg::DGSEM, cache) + @unpack nodes = dg.basis + + wbary = barycentric_weights(nodes) + + # Helper array for a straight-sided quadrilateral element + corners = zeros(eltype(mesh.corners), 4, 2) + + for index in 1:length(element_ids) + # Construct point + x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) + + # Convert to unit coordinates; procedure differs for straight-sided + # versus curvilinear elements + element = element_ids[index] + if !mesh.element_is_curved[element] + for j in 1:2, i in 1:4 + # Pull the (x,y) values of the element corners from the global corners array + corners[i, j] = mesh.corners[j, mesh.element_node_ids[i, element]] + end + # Compute coordinates in reference system + unit_coordinates = invert_bilinear_interpolation(mesh, x, corners) + + # Sanity check that the computed `unit_coordinates` indeed recover the desired point `x` + x_check = straight_side_quad_map(unit_coordinates[1], unit_coordinates[2], + corners) + if !isapprox(x[1], x_check[1]) || !isapprox(x[2], x_check[2]) + error("failed to compute computational coordinates for the time series point $(x), closet candidate was $(x_check)") + end + else # mesh.element_is_curved[element] + unit_coordinates = invert_transfinite_interpolation(mesh, x, + view(mesh.surface_curves, + :, element)) + + # Sanity check that the computed `unit_coordinates` indeed recover the desired point `x` + x_check = transfinite_quad_map(unit_coordinates[1], unit_coordinates[2], + view(mesh.surface_curves, :, element)) + if !isapprox(x[1], x_check[1]) || !isapprox(x[2], x_check[2]) + error("failed to compute computational coordinates for the time series point $(x), closet candidate was $(x_check)") + end + end + + # Calculate interpolating polynomial for each dimension, making use of tensor product structure + for d in 1:ndims(mesh) + interpolating_polynomials[:, d, index] .= lagrange_interpolating_polynomials(unit_coordinates[d], + nodes, + wbary) + end + end + + return interpolating_polynomials +end + +# Use a Newton iteration to determine the computational coordinates +# (xi, eta) of an (x,y) `point` that is given in physical coordinates +# by inverting the transformation. For straight-sided elements this +# amounts to inverting a bi-linear interpolation. For curved +# elements we invert the transfinite interpolation with linear blending. +# The residual function for the Newton iteration is +# r(xi, eta) = X(xi, eta) - point +# and the Jacobian entries are computed accordingly from either +# `straight_side_quad_map_metrics` or `transfinite_quad_map_metrics`. +# We exploit the 2x2 nature of the problem and directly compute the matrix +# inverse to make things faster. The implementations below are inspired by +# an answer on Stack Overflow (https://stackoverflow.com/a/18332009) where +# the author explicitly states that their code is released to the public domain. +@inline function invert_bilinear_interpolation(mesh::UnstructuredMesh2D, point, + element_corners) + # Initial guess for the point (center of the reference element) + xi = zero(eltype(point)) + eta = zero(eltype(point)) + for k in 1:5 # Newton's method should converge quickly + # Compute current x and y coordinate and the Jacobian matrix + # J = (X_xi, X_eta; Y_xi, Y_eta) + x, y = straight_side_quad_map(xi, eta, element_corners) + J11, J12, J21, J22 = straight_side_quad_map_metrics(xi, eta, element_corners) + + # Compute residuals for the Newton teration for the current (x, y) coordinate + r1 = x - point[1] + r2 = y - point[2] + + # Newton update that directly applies the inverse of the 2x2 Jacobian matrix + inv_detJ = inv(J11 * J22 - J12 * J21) + + # Update with explicitly inverted Jacobian + xi = xi - inv_detJ * (J22 * r1 - J12 * r2) + eta = eta - inv_detJ * (-J21 * r1 + J11 * r2) + + # Ensure updated point is in the reference element + xi = min(max(xi, -1), 1) + eta = min(max(eta, -1), 1) + end + + return SVector(xi, eta) +end + +@inline function invert_transfinite_interpolation(mesh::UnstructuredMesh2D, point, + surface_curves::AbstractVector{<:CurvedSurface}) + # Initial guess for the point (center of the reference element) + xi = zero(eltype(point)) + eta = zero(eltype(point)) + for k in 1:5 # Newton's method should converge quickly + # Compute current x and y coordinate and the Jacobian matrix + # J = (X_xi, X_eta; Y_xi, Y_eta) + x, y = transfinite_quad_map(xi, eta, surface_curves) + J11, J12, J21, J22 = transfinite_quad_map_metrics(xi, eta, surface_curves) + + # Compute residuals for the Newton teration for the current (x,y) coordinate + r1 = x - point[1] + r2 = y - point[2] + + # Newton update that directly applies the inverse of the 2x2 Jacobian matrix + inv_detJ = inv(J11 * J22 - J12 * J21) + + # Update with explicitly inverted Jacobian + xi = xi - inv_detJ * (J22 * r1 - J12 * r2) + eta = eta - inv_detJ * (-J21 * r1 + J11 * r2) + + # Ensure updated point is in the reference element + xi = min(max(xi, -1), 1) + eta = min(max(eta, -1), 1) + end + + return SVector(xi, eta) +end + +function calc_interpolating_polynomials(coordinates, element_ids, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + dg, cache) interpolating_polynomials = Array{real(dg), 3}(undef, nnodes(dg), ndims(mesh), length(element_ids)) @@ -121,8 +390,8 @@ end # Record the solution variables at each given point function record_state_at_points!(point_data, u, solution_variables, n_solution_variables, - mesh::TreeMesh{2}, equations, dg::DG, - time_series_cache) + mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, + equations, dg::DG, time_series_cache) @unpack element_ids, interpolating_polynomials = time_series_cache old_length = length(first(point_data)) new_length = old_length + n_solution_variables diff --git a/test/test_unit.jl b/test/test_unit.jl index 1907a281718..03a78f6918a 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -600,6 +600,7 @@ end end @timed_testset "TimeSeriesCallback" begin + # Test the 2D TreeMesh version of the callback and some warnings @test_nowarn_mod trixi_include(@__MODULE__, joinpath(examples_dir(), "tree_2d_dgsem", "elixir_acoustics_gaussian_source.jl"), diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 8a62dcaec3c..6814250dd47 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -198,6 +198,39 @@ end end end +@trixi_testset "elixir_euler_time_series.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_time_series.jl"), + l2=[ + 6.984024099236519e-5, + 6.289022520363763e-5, + 6.550951878107466e-5, + 0.00016222767700879948, + ], + linf=[ + 0.0005367823248620951, + 0.000671293180158461, + 0.0005656680962440319, + 0.0013910024779804075, + ], + tspan=(0.0, 0.2), + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) + # Extra test that the `TimeSeries` callback creates reasonable data + point_data_1 = time_series.affect!.point_data[1] + @test all(isapprox.(point_data_1[1:4], + [1.9546882708551676, 1.9547149531788077, + 1.9547142161310154, 3.821066781119142])) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_acoustics_gauss_wall.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_acoustics_gauss_wall.jl"), l2=[0.029330394861252995, 0.029345079728907965,