diff --git a/.gitmodules b/.gitmodules index 0f61cc08f..f61e8946a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -10,4 +10,4 @@ [submodule "extern/Microphysics"] path = extern/Microphysics url = https://github.com/psharda/Microphysics - branch = compute-cs + branch = development diff --git a/cmake/crusher.cmake b/cmake/crusher.cmake index f233a7021..8af7c4b85 100644 --- a/cmake/crusher.cmake +++ b/cmake/crusher.cmake @@ -12,8 +12,7 @@ set(CMAKE_CXX_COMPILER "CC" CACHE PATH "") set(AMReX_GPU_BACKEND HIP CACHE STRING "") set(AMReX_AMD_ARCH gfx90a CACHE STRING "") # MI250X set(AMREX_GPUS_PER_NODE 8 CACHE STRING "") -set(ENV{Ascent_DIR} "/ccs/home/wibking/ascent/ascent-v0.9.0-pre/install") -set(ENV{Kokkos_DIR} "/ccs/home/wibking/ascent/kokkos-3.6.01/install/lib64/cmake/Kokkos") +set(ENV{Ascent_DIR} "/ccs/home/wibking/ascent_crusher/install/ascent-develop") set(AMReX_CONDUIT ON BOOL STRING "") set(AMReX_ASCENT ON BOOL STRING "") option(QUOKKA_PYTHON OFF) diff --git a/extern/Microphysics b/extern/Microphysics index 03db4d5bd..3d2714f80 160000 --- a/extern/Microphysics +++ b/extern/Microphysics @@ -1 +1 @@ -Subproject commit 03db4d5bdd32a7a75f053205744b1cf543bb6eb4 +Subproject commit 3d2714f8001a9203bf95d94f88c942a606c7e1f0 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 18ef20512..ee2a28177 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -105,7 +105,7 @@ set (QuokkaObjSources "${CMAKE_CURRENT_SOURCE_DIR}/main.cpp" "${CMAKE_CURRENT_SO #endif() #link_libraries(QuokkaObj) -add_subdirectory(HydroShocktubeCMA) +add_subdirectory(StarCluster) add_subdirectory(PrimordialChem) add_subdirectory(Advection) @@ -120,6 +120,7 @@ add_subdirectory(HydroKelvinHelmholz) add_subdirectory(HydroLeblanc) add_subdirectory(HydroRichtmeyerMeshkov) add_subdirectory(HydroShocktube) +add_subdirectory(HydroShocktubeCMA) add_subdirectory(HydroShuOsher) add_subdirectory(HydroSMS) add_subdirectory(HydroVacuum) @@ -152,4 +153,3 @@ add_subdirectory(ShockCloud) add_subdirectory(PassiveScalar) add_subdirectory(FCQuantities) add_subdirectory(SphericalCollapse) - diff --git a/src/RadhydroSimulation.hpp b/src/RadhydroSimulation.hpp index 2a2abf7fb..0b6311229 100644 --- a/src/RadhydroSimulation.hpp +++ b/src/RadhydroSimulation.hpp @@ -123,6 +123,8 @@ template class RadhydroSimulation : public AMRSimulation &BCs_cc, amrex::Vector &BCs_fc) : AMRSimulation(BCs_cc, BCs_fc) { @@ -314,6 +316,12 @@ template void RadhydroSimulation::readParmParse( hpp.query("artificial_viscosity_coefficient", artificialViscosityK_); } + // set gravity runtime parameter + { + amrex::ParmParse hpp("gravity"); + hpp.query("Gconst", Gconst_); + } + // set cooling runtime parameters { amrex::ParmParse hpp("cooling"); @@ -606,10 +614,11 @@ template void RadhydroSimulation::fillPoissonRhs // NOTE: in the future, this should also deposit particle mass auto const &state = state_new_cc_[lev].const_arrays(); auto rhs = rhs_mf.arrays(); + const Real G = Gconst_; amrex::ParallelFor(rhs_mf, [=] AMREX_GPU_DEVICE(int bx, int i, int j, int k) noexcept { // copy density to rhs_mf - rhs[bx](i, j, k) = 4.0 * M_PI * state[bx](i, j, k, HydroSystem::density_index); + rhs[bx](i, j, k) = 4.0 * M_PI * G * state[bx](i, j, k, HydroSystem::density_index); }); } @@ -778,6 +787,7 @@ template void RadhydroSimulation::advanceHydroAtLevelWithRetries(int lev, amrex::Real time, amrex::Real dt_lev, amrex::YAFluxRegister *fr_as_crse, amrex::YAFluxRegister *fr_as_fine) { + BL_PROFILE_REGION("HydroSolver"); // timestep retries const int max_retries = 4; bool success = false; diff --git a/src/SphericalCollapse/spherical_collapse.hpp b/src/SphericalCollapse/spherical_collapse.hpp index 92396dc26..3449916fe 100644 --- a/src/SphericalCollapse/spherical_collapse.hpp +++ b/src/SphericalCollapse/spherical_collapse.hpp @@ -1,12 +1,12 @@ -#ifndef TEST_HYDRO3D_BLAST_HPP_ // NOLINT -#define TEST_HYDRO3D_BLAST_HPP_ +#ifndef TEST_SPHERICAL_COLLAPSE_HPP_ // NOLINT +#define TEST_SPHERICAL_COLLAPSE_HPP_ //============================================================================== // TwoMomentRad - a radiation transport library for patch-based AMR codes // Copyright 2020 Benjamin Wibking. // Released under the MIT license. See LICENSE file included in the GitHub repo. //============================================================================== -/// \file test_hydro3d_blast.hpp -/// \brief Defines a test problem for a 3D explosion. +/// \file spherical_collapse.hpp +/// \brief Defines a test problem for a pressureless spherical collapse /// // external headers @@ -17,6 +17,6 @@ #include "interpolate.hpp" // function definitions -auto testproblem_hydro_sedov() -> int; +auto problem_main() -> int; -#endif // TEST_HYDRO3D_BLAST_HPP_ +#endif // TEST_SPHERICAL_COLLAPSE_HPP_ diff --git a/src/StarCluster/CMakeLists.txt b/src/StarCluster/CMakeLists.txt new file mode 100644 index 000000000..277525261 --- /dev/null +++ b/src/StarCluster/CMakeLists.txt @@ -0,0 +1,22 @@ +if (AMReX_SPACEDIM EQUAL 3) + # Define a custom target that runs the Python script to produce the input perturbations file + + add_executable(star_cluster star_cluster.cpp TurbDataReader.cpp ${QuokkaObjSources}) + if(AMReX_GPU_BACKEND MATCHES "CUDA") + setup_target_for_cuda_compilation(star_cluster) + endif() + + execute_process( + COMMAND Python3::Interpreter -c "h5py" + RESULT_VARIABLE EXIT_CODE + OUTPUT_QUIET + ) + + add_test(NAME ComputeStarClusterPerturbations COMMAND python3 ${CMAKE_CURRENT_SOURCE_DIR}/perturbation.py --kmin=2 --kmax=64 --size=128 --alpha=2 --f_solenoidal=1.0 WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/tests) + add_test(NAME StarCluster COMMAND star_cluster StarCluster.in WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/tests) + set_tests_properties(ComputeStarClusterPerturbations PROPERTIES FIXTURES_SETUP StarCluster_fixture) + set_tests_properties(StarCluster PROPERTIES FIXTURES_REQUIRED StarCluster_fixture) + + # AMR test only works on Setonix because Gadi and avatar do not have enough memory per GPU + # add_test(NAME StarClusterAMR COMMAND star_cluster StarCluster_AMR.in WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/tests) +endif() diff --git a/src/StarCluster/TurbDataReader.cpp b/src/StarCluster/TurbDataReader.cpp new file mode 100644 index 000000000..6834c17a1 --- /dev/null +++ b/src/StarCluster/TurbDataReader.cpp @@ -0,0 +1,119 @@ +//============================================================================== +// TwoMomentRad - a radiation transport library for patch-based AMR codes +// Copyright 2020 Benjamin Wibking. +// Released under the MIT license. See LICENSE file included in the GitHub repo. +//============================================================================== +/// \file TurbDataReader.cpp +/// \brief Reads turbulent driving fields generated as cubic HDF5 arrays. +/// + +#include "TurbDataReader.hpp" +#include "AMReX_Arena.H" +#include "AMReX_BLassert.H" +#include "AMReX_Print.H" +#include "AMReX_TableData.H" +#include + +auto read_dataset(hid_t &file_id, char const *dataset_name) -> amrex::Table3D +{ + // open dataset + hid_t dset_id = 0; + dset_id = H5Dopen2(file_id, dataset_name, H5P_DEFAULT); + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(dset_id != -1, "Can't open table!"); + + // get dimensions + hid_t const dspace = H5Dget_space(dset_id); + const int ndims = H5Sget_simple_extent_ndims(dspace); + std::vector dims(ndims); + H5Sget_simple_extent_dims(dspace, dims.data(), nullptr); + + uint64_t data_size = 1; + for (int idim = 0; idim < ndims; ++idim) { + data_size *= dims[idim]; + } + + // allocate array for dataset storage + auto *temp_data = new double[data_size]; + + // read dataset + herr_t status = H5Dread(dset_id, HDF5_R8, H5S_ALL, H5S_ALL, H5P_DEFAULT, temp_data); + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(status != -1, "Failed to read dataset!"); + + // close dataset + status = H5Dclose(dset_id); + + // WARNING: Table3D uses column-major (Fortran-order) indexing, but HDF5 + // tables use row-major (C-order) indexing! + amrex::GpuArray const lo{0, 0, 0}; + amrex::GpuArray const hi{static_cast(dims[0]), static_cast(dims[1]), static_cast(dims[2])}; + auto table = amrex::Table3D(temp_data, lo, hi); + return table; +} + +void initialize_turbdata(turb_data &data, std::string &data_file) +{ + amrex::Print() << "Initializing turbulence data...\n"; + amrex::Print() << fmt::format("data_file: {}.\n", data_file); + + herr_t status = 0; + herr_t const h5_error = -1; + + // open file + hid_t file_id = 0; + file_id = H5Fopen(data_file.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(file_id != h5_error, "Failed to open data file!"); + + data.dvx = read_dataset(file_id, "pertx"); + data.dvy = read_dataset(file_id, "perty"); + data.dvz = read_dataset(file_id, "pertz"); + + // close file + status = H5Fclose(file_id); +} + +auto get_tabledata(amrex::Table3D &in_t) -> amrex::TableData +{ + amrex::Array tlo{in_t.begin[0], in_t.begin[1], in_t.begin[2]}; + amrex::Array thi{in_t.end[0] - 1, in_t.end[1] - 1, in_t.end[2] - 1}; + amrex::TableData tableData(tlo, thi, amrex::The_Pinned_Arena()); + auto h_table = tableData.table(); + + amrex::Print() << "Copying tableData on indices " << tlo << " to " << thi << ".\n"; + + // fill tableData + for (int i = tlo[0]; i <= thi[0]; ++i) { + for (int j = tlo[1]; j <= thi[1]; ++j) { + for (int k = tlo[2]; k <= thi[2]; ++k) { + h_table(i, j, k) = in_t(i, j, k); + } + } + } + + return tableData; +} + +auto computeRms(amrex::TableData &dvx, amrex::TableData &dvy, amrex::TableData &dvz) -> amrex::Real +{ + amrex::Array tlo = dvx.lo(); + amrex::Array thi = dvx.hi(); + auto const &dvx_table = dvx.const_table(); + auto const &dvy_table = dvy.const_table(); + auto const &dvz_table = dvz.const_table(); + + // compute rms power + amrex::Real rms_sq = 0; + amrex::Long N = 0; + for (int i = tlo[0]; i <= thi[0]; ++i) { + for (int j = tlo[1]; j <= thi[1]; ++j) { + for (int k = tlo[2]; k <= thi[2]; ++k) { + amrex::Real const vx = dvx_table(i, j, k); + amrex::Real const vy = dvy_table(i, j, k); + amrex::Real const vz = dvz_table(i, j, k); + rms_sq += vx * vx + vy * vy + vz * vz; + ++N; + } + } + } + rms_sq /= static_cast(N); + return std::sqrt(rms_sq); +} diff --git a/src/StarCluster/TurbDataReader.hpp b/src/StarCluster/TurbDataReader.hpp new file mode 100644 index 000000000..b127cc92a --- /dev/null +++ b/src/StarCluster/TurbDataReader.hpp @@ -0,0 +1,59 @@ +#ifndef TURBDATAREADER_HPP_ // NOLINT +#define TURBDATAREADER_HPP_ +//============================================================================== +// TwoMomentRad - a radiation transport library for patch-based AMR codes +// Copyright 2020 Benjamin Wibking. +// Released under the MIT license. See LICENSE file included in the GitHub repo. +//============================================================================== +/// \file TurbDataReader.hpp +/// \brief Reads turbulent driving fields generated as cubic HDF5 arrays. +/// + +#include +#include +#include +#include +#include + +#include "fmt/core.h" +#include +#include +#include + +#include "AMReX.H" +#include "AMReX_Array.H" +#include "AMReX_BLassert.H" +#include "AMReX_TableData.H" + +/* HDF5 definitions */ + +#define HDF5_FILE_I4 H5T_STD_I32BE +#define HDF5_FILE_I8 H5T_STD_I64BE +#define HDF5_FILE_R4 H5T_IEEE_F32BE +#define HDF5_FILE_R8 H5T_IEEE_F64BE +#define HDF5_FILE_B8 H5T_STD_B8BE + +#define HDF5_I4 H5T_NATIVE_INT +#define HDF5_I8 H5T_NATIVE_LLONG +#define HDF5_R4 H5T_NATIVE_FLOAT +#define HDF5_R8 H5T_NATIVE_DOUBLE +#define HDF5_R16 H5T_NATIVE_LDOUBLE + +// Cooling table storage + +using turb_data = struct turb_data { + // values + amrex::Table3D dvx; + amrex::Table3D dvy; + amrex::Table3D dvz; +}; + +void initialize_turbdata(turb_data &data, std::string &data_file); + +auto read_dataset(hid_t &file_id, char const *dataset_name) -> amrex::Table3D; + +auto get_tabledata(amrex::Table3D &in_t) -> amrex::TableData; + +auto computeRms(amrex::TableData &dvx, amrex::TableData &dvy, amrex::TableData &dvz) -> amrex::Real; + +#endif // TURBDATAREADER_HPP_ diff --git a/src/StarCluster/ascent_actions.yaml b/src/StarCluster/ascent_actions.yaml new file mode 100644 index 000000000..6515c4b84 --- /dev/null +++ b/src/StarCluster/ascent_actions.yaml @@ -0,0 +1,30 @@ +- + action: "add_pipelines" + pipelines: + pl1: + f2: + type: "slice" + params: + point: + x: 0. + y: 0. + z: 0. + normal: + x: 0.0 + y: 0.0 + z: 1.0 +- + action: "add_scenes" + scenes: + s1: + plots: + p1: + type: "pseudocolor" + field: "log_density" + pipeline: "pl1" + renders: + r1: + image_prefix: "dens%05d" + annotations: "true" + camera: + zoom: 1.5 diff --git a/src/StarCluster/perturbation.py b/src/StarCluster/perturbation.py new file mode 100644 index 000000000..d6d53e640 --- /dev/null +++ b/src/StarCluster/perturbation.py @@ -0,0 +1,311 @@ +import h5py +import numpy as np +import numpy.fft +from math import * +from optparse import OptionParser +import sys + + +def init_perturbations(n, kmin, kmax, dtype): + kx = np.zeros(n, dtype=dtype) + ky = np.zeros(n, dtype=dtype) + kz = np.zeros(n, dtype=dtype) + # perform fft k-ordering convention shifts + for j in range(0,n[1]): + for k in range(0,n[2]): + kx[:,j,k] = n[0]*np.fft.fftfreq(n[0]) + for i in range(0,n[0]): + for k in range(0,n[2]): + ky[i,:,k] = n[1]*np.fft.fftfreq(n[1]) + for i in range(0,n[0]): + for j in range(0,n[1]): + kz[i,j,:] = n[2]*np.fft.fftfreq(n[2]) + + kx = np.array(kx, dtype=dtype) + ky = np.array(ky, dtype=dtype) + kz = np.array(kz, dtype=dtype) + k = np.sqrt(np.array(kx**2+ky**2+kz**2, dtype=dtype)) + + # only use the positive frequencies + inds = np.where(np.logical_and(k**2 >= kmin**2, k**2 < (kmax+1)**2)) + nr = len(inds[0]) + + phasex = np.zeros(n, dtype=dtype) + phasex[inds] = 2.*pi*np.random.uniform(size=nr) + fx = np.zeros(n, dtype=dtype) + fx[inds] = np.random.normal(size=nr) + + phasey = np.zeros(n, dtype=dtype) + phasey[inds] = 2.*pi*np.random.uniform(size=nr) + fy = np.zeros(n, dtype=dtype) + fy[inds] = np.random.normal(size=nr) + + phasez = np.zeros(n, dtype=dtype) + phasez[inds] = 2.*pi*np.random.uniform(size=nr) + fz = np.zeros(n, dtype=dtype) + fz[inds] = np.random.normal(size=nr) + + # rescale perturbation amplitude so that low number statistics + # at low k do not throw off the desired power law scaling. + for i in range(kmin, kmax+1): + slice_inds = np.where(np.logical_and(k >= i, k < i+1)) + rescale = sqrt(np.sum(np.abs(fx[slice_inds])**2 + np.abs(fy[slice_inds])**2 + np.abs(fz[slice_inds])**2)) + fx[slice_inds] = fx[slice_inds]/rescale + fy[slice_inds] = fy[slice_inds]/rescale + fz[slice_inds] = fz[slice_inds]/rescale + + # set the power law behavior + # wave number bins + fx[inds] = fx[inds]*k[inds]**-(0.5*alpha) + fy[inds] = fy[inds]*k[inds]**-(0.5*alpha) + fz[inds] = fz[inds]*k[inds]**-(0.5*alpha) + + # add in phases + fx = np.cos(phasex)*fx + 1j*np.sin(phasex)*fx + fy = np.cos(phasey)*fy + 1j*np.sin(phasey)*fy + fz = np.cos(phasez)*fz + 1j*np.sin(phasez)*fz + + return fx, fy, fz, kx, ky, kz + + +def normalize(fx, fy, fz): + norm = np.sqrt(np.sum(fx**2 + fy**2 + fz**2)/np.product(n)) + fx = fx/norm + fy = fy/norm + fz = fz/norm + return fx, fy, fz + + +def make_perturbations(n, kmin, kmax, f_solenoidal): + fx, fy, fz, kx, ky, kz = init_perturbations(n, kmin, kmax, dtype) + if f_solenoidal != None: + k2 = kx**2+ky**2+kz**2 + # solenoidal part + fxs = 0.; fys =0.; fzs = 0. + if f_solenoidal != 0.0: + fxs = np.real(fx - kx*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16)) + fys = np.real(fy - ky*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16)) + fzs = np.real(fz - kz*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16)) + ind = np.where(k2 == 0) + fxs[ind] = 0.; fys[ind] = 0.; fzs[ind] = 0. + # need to normalize this before applying relative weighting of solenoidal / compressive components + norm = np.sqrt(np.sum(fxs**2+fys**2+fzs**2)) + fxs = fxs/norm + fys = fys/norm + fzs = fzs/norm + # compressive part + # get a different random cube for the compressive part + # so that we can target the RMS solenoidal fraction, + # instead of setting a constant solenoidal fraction everywhere. + fx, fy, fz, kx, ky, kz = init_perturbations(n, kmin, kmax, dtype) + fxc = 0.; fyc =0.; fzc = 0. + if f_solenoidal != 1.0: + fxc = np.real(kx*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16)) + fyc = np.real(ky*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16)) + fzc = np.real(kz*(kx*fx+ky*fy+kz*fz)/np.maximum(k2,1e-16)) + ind = np.where(k2 == 0) + fxc[ind] = 0.; fyc[ind] = 0.; fzc[ind] = 0. + # need to normalize this before applying relative weighting of solenoidal / compressive components + norm = np.sqrt(np.sum(fxc**2+fyc**2+fzc**2)) + fxc = fxc/norm + fyc = fyc/norm + fzc = fzc/norm + # back to real space + pertx = np.real(np.fft.ifftn(f_solenoidal*fxs + (1.-f_solenoidal)*fxc)) + perty = np.real(np.fft.ifftn(f_solenoidal*fys + (1.-f_solenoidal)*fyc)) + pertz = np.real(np.fft.ifftn(f_solenoidal*fzs + (1.-f_solenoidal)*fzc)) + else: + # just convert to real space + pertx = np.real(np.fft.ifftn(fx)) + perty = np.real(np.fft.ifftn(fy)) + pertz = np.real(np.fft.ifftn(fz)) + + # subtract off COM (assuming uniform density) + pertx = pertx-np.average(pertx) + perty = perty-np.average(perty) + pertz = pertz-np.average(pertz) + # scale RMS of perturbation cube to unity + pertx, perty, pertz = normalize(pertx, perty, pertz) + return pertx, perty, pertz + + +def cut_sphere(pertx, perty, pertz, rad): + # Make radial array + x, y, z = np.mgrid[0:n[0], 0:n[1], 0:n[2]] + x = x - (n[0]-1)/2. + y = y - (n[1]-1)/2. + z = z - (n[2]-1)/2. + r2 = x**2+y**2+z**2 + # Get range of indices we want to set to zero, and those we want to keep + idx0 = r2 > (rad*n[0]/2.0)**2 + idx1 = np.logical_not(idx0) + # Zero outside the desired region + pertx[idx0] = 0.0 + perty[idx0] = 0.0 + pertz[idx0] = 0.0 + # Recompute COM velocity, and renormalize + pertx[idx1] = pertx[idx1]-np.average(pertx[idx1]) + perty[idx1] = perty[idx1]-np.average(perty[idx1]) + pertz[idx1] = pertz[idx1]-np.average(pertz[idx1]) + pertx, perty, pertz = normalize(pertx, perty, pertz) + return pertx, perty, pertz + + +def get_erot_ke_ratio(pertx, perty, pertz, rad=-1.0): + x, y, z = np.mgrid[0:n[0], 0:n[1], 0:n[2]] + x = x - (n[0]-1)/2. + y = y - (n[1]-1)/2. + z = z - (n[2]-1)/2. + r2 = x**2+y**2+z**2 + if rad > 0: + idx0 = r2 > (rad*n[0]/2.0)**2 + r2[idx0] = 0.0 + erot_ke_ratio = (np.sum(y*pertz-z*perty)**2 + + np.sum(z*pertx-x*pertz)**2 + + np.sum(x*perty-y*pertx)**2)/(np.sum(r2)*np.product(n)) + return erot_ke_ratio + + +def plot_spectrum1D(pertx, perty, pertz): + # plot the 1D power to check the scaling. + fx = np.abs(np.fft.fftn(pertx)) + fy = np.abs(np.fft.fftn(perty)) + fz = np.abs(np.fft.fftn(pertz)) + fx = np.abs(fx) + fy = np.abs(fy) + fz = np.abs(fz) + kx = np.zeros(n, dtype=dtype) + ky = np.zeros(n, dtype=dtype) + kz = np.zeros(n, dtype=dtype) + # perform fft k-ordering convention shifts + for j in range(0,n[1]): + for k in range(0,n[2]): + kx[:,j,k] = n[0]*np.fft.fftfreq(n[0]) + for i in range(0,n[0]): + for k in range(0,n[2]): + ky[i,:,k] = n[1]*np.fft.fftfreq(n[1]) + for i in range(0,n[0]): + for j in range(0,n[1]): + kz[i,j,:] = n[2]*np.fft.fftfreq(n[2]) + k = np.sqrt(np.array(kx**2+ky**2+kz**2,dtype=dtype)) + k1d = [] + power = [] + for i in range(kmin,kmax+1): + slice_inds = np.where(np.logical_and(k >= i, k < i+1)) + k1d.append(i+0.5) + power.append(np.sum(fx[slice_inds]**2 + fy[slice_inds]**2 + fz[slice_inds]**2)) + print(i,power[-1]) + import matplotlib.pyplot as plt + plt.loglog(k1d, power) + plt.show() + + +################### +# input parameters, read from command line +################### +parser = OptionParser() +parser.add_option('--kmin', dest='kmin', + help='minimum wavenumber.', + default=-1) +parser.add_option('--kmax', dest='kmax', + help='maximum wavenumber.', + default=-1) +parser.add_option('--size', dest='size', + help='size of each direction of data cube. default=256', + default=256) +parser.add_option('--alpha', dest='alpha', + help='negative of power law slope. (Power ~ k^-alpha) '+ + 'supersonic turbulence is near alpha=2. '+ + 'driving over a narrow band of two modes is often done with alpha=0', + default = None) +parser.add_option('--seed', dest='seed', + help='seed for random # generation. default=0', + default = 0) +parser.add_option('--f_solenoidal', dest='f_solenoidal', + help='volume RMS fraction of solenoidal component of the perturbations relative to the total. ' + + 'If --f_solenoidal=None, the motions are purely random. For low wave numbers ' + + 'the relative imporance of solenoidal to compressive may be sensitive to the ' + + 'choice of radom seed. It has been suggested (Federrath 2008) that ' + + 'f_solenoidal=2/3 is the most natural driving mode and this is currently' + + 'the suggested best-practice.', + default = -1) +parser.add_option('--sphererad', dest='rad', + help='if set, perturbations are set to zero outside spherical region, '+ + 'and the perturbation field is shifted and renormalized to keep the '+ + 'center of mass velocity at zero and the variance at unity; the '+ + 'spherical region cut out is centered at the center of the perturbation '+ + 'cube, and has a radius given by the value of this parameter, with sphererad = '+ + '1 corresponding to the spherical region going all the way to the edge of the '+ + 'perturbation cube', + default = -1.0) + +(options, args) = parser.parse_args() + +# size of the data domain +n = [int(options.size), int(options.size), int(options.size)] +# range of perturbation length scale in units of the smallest side of the domain +kmin = int(options.kmin) +kmax = int(options.kmax) +print(kmin, kmax) +if kmin > kmax or kmin < 0 or kmax < 0: + print("kmin must be < kmax, with kmin > 0, kmax > 0. See --help.") + sys.exit(0) +if kmax > floor(np.min(n))/2: + print("kmax must be <= floor(size/2). See --help.") + sys.exit(0) +f_solenoidal = options.f_solenoidal +if f_solenoidal == "None" or f_solenoidal == "none": + f_solenoidal = None +else: + f_solenoidal = float(options.f_solenoidal) + if f_solenoidal > 1. or f_solenoidal < 0.: + print("You must choose f_solenoidal. See --help.") + sys.exit(0) +alpha = options.alpha +if alpha==None: + print("You must choose a power law slope, alpha. See --help.") + sys.exit(0) +alpha = float(options.alpha) +if alpha < 0.: + print("alpha is less than zero. That's probably not what you want. See --help.") + sys.exit(0) +seed = int(options.seed) +# data precision +dtype = np.float64 +# ratio of solenoidal to compressive components +if options.f_solenoidal=="None" or options.f_solenoidal==None: + f_solenoidal = None +else: + f_solenoidal = min(max(float(options.f_solenoidal), 0.), 1.) +rad = float(options.rad) +if rad > 1: + raise ValueError('sphererad is '+options.rad+', must be from 0 to 1') + +################### +# begin computation +################### + +np.random.seed(seed=seed) +pertx, perty, pertz = make_perturbations(n, kmin, kmax, f_solenoidal) +if rad > 0: + pertx, perty, pertz = cut_sphere(pertx, perty, pertz, rad) +erot_ke_ratio = get_erot_ke_ratio(pertx, perty, pertz, rad) +print("erot_ke_ratio = ", erot_ke_ratio) + +# hdf5 output +f = h5py.File('zdrv.hdf5', 'w') +ds = f['/'].create_dataset('pertx', n, dtype=np.float64) +ds[:] = pertx +ds = f['/'].create_dataset('perty', n, dtype=np.float64) +ds[:] = perty +ds = f['/'].create_dataset('pertz', n, dtype=np.float64) +ds[:] = pertz +f['/'].attrs['kmin'] = kmin +f['/'].attrs['kmax'] = kmax +f['/'].attrs['alpha'] = alpha +if f_solenoidal!=None: f['/'].attrs['f_solenoidal'] = f_solenoidal +if rad > 0: f['/'].attrs['sphererad'] = rad +f['/'].attrs['erot_ke_ratio'] = erot_ke_ratio +f['/'].attrs['seed'] = seed +f.close() + diff --git a/src/StarCluster/star_cluster.cpp b/src/StarCluster/star_cluster.cpp new file mode 100644 index 000000000..d2c7af4df --- /dev/null +++ b/src/StarCluster/star_cluster.cpp @@ -0,0 +1,248 @@ +//============================================================================== +// TwoMomentRad - a radiation transport library for patch-based AMR codes +// Copyright 2020 Benjamin Wibking. +// Released under the MIT license. See LICENSE file included in the GitHub repo. +//============================================================================== +/// \file star_cluster.cpp +/// \brief Defines a test problem for pressureless spherical collapse of a star cluster. +/// +#include +#include +#include + +#include "AMReX.H" +#include "AMReX_Arena.H" +#include "AMReX_BC_TYPES.H" +#include "AMReX_BLassert.H" +#include "AMReX_Config.H" +#include "AMReX_FabArrayUtility.H" +#include "AMReX_MultiFab.H" +#include "AMReX_ParallelDescriptor.H" +#include "AMReX_ParmParse.H" +#include "AMReX_Print.H" +#include "AMReX_SPACE.H" +#include "AMReX_TableData.H" + +#include "EOS.hpp" +#include "RadhydroSimulation.hpp" +#include "TurbDataReader.hpp" +#include "hydro_system.hpp" +#include "star_cluster.hpp" + +using amrex::Real; + +struct StarCluster { +}; + +template <> struct quokka::EOS_Traits { + static constexpr double gamma = 1.0; + static constexpr double cs_isothermal = 1.0; // dimensionless + static constexpr double mean_molecular_weight = 1.0; + static constexpr double boltzmann_constant = C::k_B; +}; + +template <> struct HydroSystem_Traits { + static constexpr bool reconstruct_eint = false; +}; + +template <> struct Physics_Traits { + // cell-centred + static constexpr bool is_hydro_enabled = true; + static constexpr bool is_chemistry_enabled = false; + static constexpr int numMassScalars = 0; // number of mass scalars + static constexpr int numPassiveScalars = numMassScalars + 0; // number of passive scalars + static constexpr bool is_radiation_enabled = false; + // face-centred + static constexpr bool is_mhd_enabled = false; +}; + +template <> struct SimulationData { + // real-space perturbation fields + amrex::TableData dvx; + amrex::TableData dvy; + amrex::TableData dvz; + Real dv_rms_generated{}; + Real dv_rms_target{}; + Real rescale_factor{}; + + // cloud parameters + Real R_sphere{}; + Real rho_sphere{}; + Real alpha_vir{}; +}; + +template <> void RadhydroSimulation::preCalculateInitialConditions() +{ + static bool isSamplingDone = false; + if (!isSamplingDone) { + // read perturbations from file + turb_data turbData; + amrex::ParmParse pp("perturb"); + std::string turbdata_filename; + pp.query("filename", turbdata_filename); + initialize_turbdata(turbData, turbdata_filename); + + // copy to pinned memory + auto pinned_dvx = get_tabledata(turbData.dvx); + auto pinned_dvy = get_tabledata(turbData.dvy); + auto pinned_dvz = get_tabledata(turbData.dvz); + + // compute normalisation + userData_.dv_rms_generated = computeRms(pinned_dvx, pinned_dvy, pinned_dvz); + amrex::Print() << "rms dv = " << userData_.dv_rms_generated << "\n"; + + const Real R_sphere = userData_.R_sphere; + const Real rho_sph = userData_.rho_sphere; + const Real alpha_vir = userData_.alpha_vir; + const Real M_sphere = (4. / 3.) * M_PI * std::pow(R_sphere, 3) * rho_sph; + const Real rms_dv_target = std::sqrt(alpha_vir * (3. / 5.) * Gconst_ * M_sphere / R_sphere); + const Real rms_Mach_target = rms_dv_target / quokka::EOS_Traits::cs_isothermal; + const Real rms_dv_actual = userData_.dv_rms_generated; + userData_.rescale_factor = rms_dv_target / rms_dv_actual; + amrex::Print() << "rms Mach target = " << rms_Mach_target << "\n"; + + // copy to GPU + userData_.dvx.resize(pinned_dvx.lo(), pinned_dvx.hi()); + userData_.dvx.copy(pinned_dvx); + + userData_.dvy.resize(pinned_dvy.lo(), pinned_dvy.hi()); + userData_.dvy.copy(pinned_dvy); + + userData_.dvz.resize(pinned_dvz.lo(), pinned_dvz.hi()); + userData_.dvz.copy(pinned_dvz); + + isSamplingDone = true; + } +} + +template <> void RadhydroSimulation::setInitialConditionsOnGrid(quokka::grid grid_elem) +{ + // set initial conditions + amrex::GpuArray const dx = grid_elem.dx_; + amrex::GpuArray prob_lo = grid_elem.prob_lo_; + amrex::GpuArray prob_hi = grid_elem.prob_hi_; + const amrex::Box &indexRange = grid_elem.indexRange_; + const amrex::Array4 &state_cc = grid_elem.array_; + + amrex::Real const x0 = prob_lo[0] + 0.5 * (prob_hi[0] - prob_lo[0]); + amrex::Real const y0 = prob_lo[1] + 0.5 * (prob_hi[1] - prob_lo[1]); + amrex::Real const z0 = prob_lo[2] + 0.5 * (prob_hi[2] - prob_lo[2]); + + // cloud parameters + const double rho_min = 0.01 * userData_.rho_sphere; + const double rho_max = userData_.rho_sphere; + const double R_sphere = userData_.R_sphere; + const double R_smooth = 0.05 * R_sphere; + const double renorm_amp = userData_.rescale_factor; + + auto const &dvx = userData_.dvx.const_table(); + auto const &dvy = userData_.dvy.const_table(); + auto const &dvz = userData_.dvz.const_table(); + + amrex::ParallelFor(indexRange, [=] AMREX_GPU_DEVICE(int i, int j, int k) { + amrex::Real const x = prob_lo[0] + (i + static_cast(0.5)) * dx[0]; + amrex::Real const y = prob_lo[1] + (j + static_cast(0.5)) * dx[1]; + amrex::Real const z = prob_lo[2] + (k + static_cast(0.5)) * dx[2]; + amrex::Real const r = std::sqrt(std::pow(x - x0, 2) + std::pow(y - y0, 2) + std::pow(z - z0, 2)); + + double const rho = std::max(rho_min, rho_max * ((std::tanh((R_sphere - r) / R_smooth) + 1.0) / 2.0)); + AMREX_ASSERT(!std::isnan(rho)); + + double vx = dvx(i, j, k); + double vy = dvy(i, j, k); + double vz = dvz(i, j, k); + + vx *= renorm_amp * (rho / rho_max); + vy *= renorm_amp * (rho / rho_max); + vz *= renorm_amp * (rho / rho_max); + + state_cc(i, j, k, HydroSystem::density_index) = rho; + state_cc(i, j, k, HydroSystem::x1Momentum_index) = rho * vx; + state_cc(i, j, k, HydroSystem::x2Momentum_index) = rho * vy; + state_cc(i, j, k, HydroSystem::x3Momentum_index) = rho * vz; + state_cc(i, j, k, HydroSystem::energy_index) = 0; + state_cc(i, j, k, HydroSystem::internalEnergy_index) = 0; + }); +} + +template <> void RadhydroSimulation::ErrorEst(int lev, amrex::TagBoxArray &tags, amrex::Real /*time*/, int /*ngrow*/) +{ + // refine on Jeans length + const int N_cells = 4; // inverse of the 'Jeans number' [Truelove et al. (1997)] + const amrex::Real cs = quokka::EOS_Traits::cs_isothermal; + const amrex::Real dx = geom[lev].CellSizeArray()[0]; + const amrex::Real G = Gconst_; + + auto const &state = state_new_cc_[lev].const_arrays(); + auto tag = tags.arrays(); + + amrex::ParallelFor(tags, [=] AMREX_GPU_DEVICE(int bx, int i, int j, int k) noexcept { + Real const rho = state[bx](i, j, k, HydroSystem::density_index); + const amrex::Real l_Jeans = cs * std::sqrt(M_PI / (G * rho)); + + if (l_Jeans < (N_cells * dx)) { + tag[bx](i, j, k) = amrex::TagBox::SET; + } + }); +} + +template <> void RadhydroSimulation::ComputeDerivedVar(int lev, std::string const &dname, amrex::MultiFab &mf, const int ncomp_cc_in) const +{ + // compute derived variables and save in 'mf' + if (dname == "log_density") { + const int ncomp = ncomp_cc_in; + auto const &state = state_new_cc_[lev].const_arrays(); + auto output = mf.arrays(); + + amrex::ParallelFor(mf, [=] AMREX_GPU_DEVICE(int bx, int i, int j, int k) noexcept { + Real const rho = state[bx](i, j, k, HydroSystem::density_index); + output[bx](i, j, k, ncomp) = std::log10(rho); + }); + } +} + +auto problem_main() -> int +{ + // read problem parameters + amrex::ParmParse const pp("perturb"); + + // cloud radius + Real R_sphere{}; + pp.query("cloud_radius", R_sphere); + + // cloud density + Real rho_sphere{}; + pp.query("cloud_density", rho_sphere); + + // cloud virial parameter + Real alpha_vir{}; + pp.query("virial_parameter", alpha_vir); + + // boundary conditions + const int ncomp_cc = Physics_Indices::nvarTotal_cc; + amrex::Vector BCs_cc(ncomp_cc); + for (int n = 0; n < ncomp_cc; ++n) { + for (int i = 0; i < AMREX_SPACEDIM; ++i) { + BCs_cc[n].setLo(i, amrex::BCType::foextrap); + BCs_cc[n].setHi(i, amrex::BCType::foextrap); + } + } + + // Problem initialization + RadhydroSimulation sim(BCs_cc); + sim.doPoissonSolve_ = 1; // enable self-gravity + sim.densityFloor_ = 0.01; + + sim.userData_.R_sphere = R_sphere; + sim.userData_.rho_sphere = rho_sphere; + sim.userData_.alpha_vir = alpha_vir; + + // initialize + sim.setInitialConditions(); + + // evolve + sim.evolve(); + + int const status = 0; + return status; +} diff --git a/src/StarCluster/star_cluster.hpp b/src/StarCluster/star_cluster.hpp new file mode 100644 index 000000000..292a801ee --- /dev/null +++ b/src/StarCluster/star_cluster.hpp @@ -0,0 +1,22 @@ +#ifndef STAR_CLUSTER_HPP_ // NOLINT +#define STAR_CLUSTER_HPP_ +//============================================================================== +// TwoMomentRad - a radiation transport library for patch-based AMR codes +// Copyright 2020 Benjamin Wibking. +// Released under the MIT license. See LICENSE file included in the GitHub repo. +//============================================================================== +/// \file star_cluster.hpp +/// \brief Defines a test problem for a 3D explosion. +/// + +// external headers +#include + +// internal headers +#include "hydro_system.hpp" +#include "interpolate.hpp" + +// function definitions +auto problem_main() -> int; + +#endif // STAR_CLUSTER_HPP_ diff --git a/src/simulation.hpp b/src/simulation.hpp index 1a49096a6..130f3d773 100644 --- a/src/simulation.hpp +++ b/src/simulation.hpp @@ -798,8 +798,10 @@ template void AMRSimulation::ellipticSolveAllLev amrex::Abort("Poisson solve is not support when AMR subcycling is enabled! You must set do_subcycle = 0."); } + BL_PROFILE_REGION("GravitySolver"); + // set up elliptic solve object - amrex::OpenBCSolver poissonSolver(geom, grids, dmap); + amrex::OpenBCSolver poissonSolver(Geom(0, finest_level), boxArray(0, finest_level), DistributionMap(0, finest_level)); if (verbose) { poissonSolver.setVerbose(true); poissonSolver.setBottomVerbose(false); diff --git a/tests/SphericalCollapse.in b/tests/SphericalCollapse.in index a55e6eeb5..db6d71804 100644 --- a/tests/SphericalCollapse.in +++ b/tests/SphericalCollapse.in @@ -25,6 +25,8 @@ cfl = 0.25 max_timesteps = 50000 stop_time = 0.15 +gravity.Gconst = 1.0 #gravitational constant + do_reflux = 1 do_subcycle = 0 diff --git a/tests/StarCluster.in b/tests/StarCluster.in new file mode 100644 index 000000000..dbd1e3121 --- /dev/null +++ b/tests/StarCluster.in @@ -0,0 +1,48 @@ +# ***************************************************************** +# Problem size and geometry +# ***************************************************************** +geometry.prob_lo = -10. -10. -10. +geometry.prob_hi = 10. 10. 10. +geometry.is_periodic = 0 0 0 + +# ***************************************************************** +# VERBOSITY +# ***************************************************************** +amr.v = 1 # verbosity in Amr + +# ***************************************************************** +# Resolution and refinement +# ***************************************************************** +amr.n_cell = 128 128 128 +amr.max_level = 0 # number of levels = max_level + 1 +amr.blocking_factor = 16 # grid size must be divisible by this +amr.max_grid_size = 64 # at least 128 for GPUs +amr.n_error_buf = 3 # minimum 3 cell buffer around tagged cells +amr.grid_eff = 0.7 # default + +hydro.reconstruction_order = 3 # PPM +cfl = 0.2 +max_timesteps = 300 +stop_time = 0.5 # t_ff = 0.55 + +gravity.Gconst = 1.0 #gravitational constant + +do_reflux = 1 +do_subcycle = 0 + +ascent_interval = 10 +plotfile_interval = 20 +checkpoint_interval = 100 + +perturb.cloud_radius = 5.0 +perturb.cloud_density = 1.0 +perturb.virial_parameter = 2.0 + +# in quokka/src/StarCluster, generate with 'python3 perturbation.py --kmin=2 --kmax=64 --size=128 --alpha=2 --f_solenoidal=1.0' +# and put it in quokka/tests/ +perturb.filename = "zdrv.hdf5" + +derived_vars = log_density + +amrex.throw_exception = 0 +amrex.signal_handling = 1 diff --git a/tests/StarCluster_AMR.in b/tests/StarCluster_AMR.in new file mode 100644 index 000000000..42cb9fb39 --- /dev/null +++ b/tests/StarCluster_AMR.in @@ -0,0 +1,48 @@ +# ***************************************************************** +# Problem size and geometry +# ***************************************************************** +geometry.prob_lo = -10. -10. -10. +geometry.prob_hi = 10. 10. 10. +geometry.is_periodic = 0 0 0 + +# ***************************************************************** +# VERBOSITY +# ***************************************************************** +amr.v = 1 # verbosity in Amr + +# ***************************************************************** +# Resolution and refinement +# ***************************************************************** +amr.n_cell = 512 512 512 +amr.max_level = 4 # number of levels = max_level + 1 +amr.blocking_factor = 32 # grid size must be divisible by this +amr.max_grid_size = 128 # at least 128 for GPUs +amr.n_error_buf = 3 # minimum 3 cell buffer around tagged cells +amr.grid_eff = 0.7 # default + +hydro.reconstruction_order = 3 # PPM +cfl = 0.2 +max_timesteps = 10000 +stop_time = 0.5 # t_ff = 0.55 + +gravity.Gconst = 1.0 #gravitational constant + +do_reflux = 1 +do_subcycle = 0 + +ascent_interval = 100 +plotfile_interval = 500 +checkpoint_interval = 1000 + +perturb.filename = "zdrv.hdf5" +perturb.kmin = 2 +perturb.kmax = 128 +perturb.power_law = 2 +perturb.cloud_radius = 5.0 +perturb.cloud_density = 1.0 +perturb.virial_parameter = 2.0 + +derived_vars = log_density + +amrex.throw_exception = 0 +amrex.signal_handling = 1