Skip to content
Snippets Groups Projects
Commit 01dd97aa authored by William Roper's avatar William Roper
Browse files

Merge branch 'master' into explict_bkg_cdim

parents c2ceaf8f a1cc00a0
No related branches found
No related tags found
No related merge requests found
Showing
with 872 additions and 18 deletions
...@@ -27,8 +27,10 @@ endif ...@@ -27,8 +27,10 @@ endif
SUBDIRS += src argparse examples doc tests tools SUBDIRS += src argparse examples doc tests tools
if HAVEEAGLECOOLING if HAVEEAGLECOOLING
SUBDIRS += examples/Cooling/CoolingRates SUBDIRS += examples/Cooling/CoolingRates
endif DIST_SUBDIRS = $(SUBDIRS)
else
DIST_SUBDIRS = $(SUBDIRS) examples/Cooling/CoolingRates DIST_SUBDIRS = $(SUBDIRS) examples/Cooling/CoolingRates
endif
# Common flags # Common flags
MYFLAGS = MYFLAGS =
......
...@@ -2316,6 +2316,11 @@ case "$with_cooling" in ...@@ -2316,6 +2316,11 @@ case "$with_cooling" in
with_cooling_name=$with_cooling with_cooling_name=$with_cooling
;; ;;
grackle_*) grackle_*)
if test "$have_grackle" != "yes"; then
AC_MSG_ERROR([Grackle cooling: You need the grackle library for Grackle cooling. (--with-grackle=PATH)])
fi
AC_DEFINE([COOLING_GRACKLE], [1], [Cooling via the grackle library]) AC_DEFINE([COOLING_GRACKLE], [1], [Cooling via the grackle library])
primordial_chemistry=${with_cooling#*_} primordial_chemistry=${with_cooling#*_}
AC_DEFINE_UNQUOTED([COOLING_GRACKLE_MODE], [$primordial_chemistry], [Grackle chemistry network]) AC_DEFINE_UNQUOTED([COOLING_GRACKLE_MODE], [$primordial_chemistry], [Grackle chemistry network])
......
...@@ -81,3 +81,13 @@ EAGLEChemistry: # Solar abundances ...@@ -81,3 +81,13 @@ EAGLEChemistry: # Solar abundances
GEARPressureFloor: GEARPressureFloor:
jeans_factor: 0. # Number of particles required to suppose a resolved clump and avoid the pressure floor. jeans_factor: 0. # Number of particles required to suppose a resolved clump and avoid the pressure floor.
COLIBRECooling:
dir_name: ./UV_dust1_CR1_G1_shield1.hdf5 # Location of the cooling tables
H_reion_z: 7.5 # Redshift of Hydrogen re-ionization (Planck 2018)
H_reion_eV_p_H: 2.0
He_reion_z_centre: 3.5 # Redshift of the centre of the Helium re-ionization Gaussian
He_reion_z_sigma: 0.5 # Spread in redshift of the Helium re-ionization Gaussian
He_reion_eV_p_H: 2.0 # Energy inject by Helium re-ionization in electron-volt per Hydrogen atom
delta_logTEOS_subgrid_properties: 0.3 # delta log T above the EOS below which the subgrid properties use Teq assumption
rapid_cooling_threshold: 0.333333 # Switch to rapid cooling regime for dt / t_cool above this threshold.
...@@ -36,6 +36,9 @@ def get_data_dump(metadata): ...@@ -36,6 +36,9 @@ def get_data_dump(metadata):
+ "$\\bf{Hydrodynamics}$\n" + "$\\bf{Hydrodynamics}$\n"
+ metadata.hydro_info + metadata.hydro_info
+ "\n\n" + "\n\n"
+ "$\\bf{Cooling}$\n"
+ metadata.subgrid_scheme["Cooling Model"].decode("utf-8")
+ "\n\n"
+ "$\\bf{Viscosity}$\n" + "$\\bf{Viscosity}$\n"
+ viscosity + viscosity
+ "\n\n" + "\n\n"
...@@ -68,8 +71,6 @@ def setup_axes(): ...@@ -68,8 +71,6 @@ def setup_axes():
for a in ax[:-1]: for a in ax[:-1]:
a.set_xlim(0, 100) a.set_xlim(0, 100)
fig.tight_layout(pad=0.5)
return fig, ax return fig, ax
...@@ -103,12 +104,15 @@ def get_data(handle: float, n_snaps: int): ...@@ -103,12 +104,15 @@ def get_data(handle: float, n_snaps: int):
try: try:
energies.append( energies.append(
np.mean( np.mean(
(data.gas.internal_energies * data.gas.masses).to(erg).value (data.gas.internal_energies * data.gas.masses)
.astype(np.float64)
.to(erg)
.value
) )
* data.metadata.scale_factor ** (2) * data.metadata.scale_factor ** (2)
) )
radiated_energies.append( radiated_energies.append(
np.mean(data.gas.radiated_energy.to(erg).value) np.mean(data.gas.radiated_energy.astype(np.float64).to(erg).value)
) )
except AttributeError: except AttributeError:
continue continue
...@@ -152,7 +156,7 @@ def plot_single_data( ...@@ -152,7 +156,7 @@ def plot_single_data(
label_radiated = "" label_radiated = ""
label_sum = "" label_sum = ""
ax[2].plot(data[0], data[3], label=label_energy, ls="dotted", C=f"C{run}") ax[2].plot(data[0], data[3], label=label_energy, ls="dotted", color=f"C{run}")
# ax[2].plot(data[0], data[4], label=label_radiated, ls="dashed", C=f"C{run}") # ax[2].plot(data[0], data[4], label=label_radiated, ls="dashed", C=f"C{run}")
...@@ -222,4 +226,5 @@ if __name__ == "__main__": ...@@ -222,4 +226,5 @@ if __name__ == "__main__":
fig, ax = make_plot(handles, names, timestep_names) fig, ax = make_plot(handles, names, timestep_names)
fig.tight_layout(pad=0.5)
fig.savefig("redshift_dependence.png", dpi=300) fig.savefig("redshift_dependence.png", dpi=300)
Realistic SNe Sedov-Taylor explosion (realistic in the sense that it uses the
right order of magnitude for all variables). Meant to be run with const Lambda
cooling, but should also work with other cooling (the workflow also obtains
the EAGLE cooling tables).
The included Makefile (run.make) contains the full workflow that runs the
simulation and creates a plot of the energy evolution, the evolution of the
shock radius and velocity, and a movie of the density, velocity and temperature
profile.
To run the test, use
make -f run.make
this will run the simulation in a subdirectory called "run".
To run in a different subdirectory (useful for comparisons), use
make -f run.make folder=my_subdirectory_name
The energy and shock evolution scripts (plot_energy.py and plot_time_profile.py)
can also be run in comparison mode:
python3 plot_energy.py \
subdirectory1/statistics.txt subdirectory1/statistics.txt \
energy_1_vs_2.png \
--names "Run 1" "Run 2"
and
python3 plot_time_evolution.py \
subdirectory1/profile.txt subdirectory1/profile.txt \
time_evolution_1_vs_2.png \
--names "Run 1" "Run 2"
(there is not limit on the number of runs that can be included).
#!/bin/bash
wget http://virgodb.cosma.dur.ac.uk/swift-webstorage/ICs/glassCube_64.hdf5
import numpy as np
import h5py
import argparse
import scipy.stats as stats
import unyt
argparser = argparse.ArgumentParser()
argparser.add_argument("input", nargs="+")
argparser.add_argument("output")
args = argparser.parse_args()
nfile = len(args.input)
time = np.zeros(nfile)
radius = np.zeros(nfile)
velocity = np.zeros(nfile)
for ifile, file in enumerate(args.input):
with h5py.File(file, "r") as handle:
coords = handle["PartType0/Coordinates"][:]
rho = handle["PartType0/Densities"][:]
vs = handle["PartType0/Velocities"][:]
t = handle["Header"].attrs["Time"][0]
box = handle["Header"].attrs["BoxSize"][:]
units = dict(handle["Units"].attrs)
uM = (units["Unit mass in cgs (U_M)"][0] * unyt.g).in_base("galactic")
uL = (units["Unit length in cgs (U_L)"][0] * unyt.cm).in_base("galactic")
ut = (units["Unit time in cgs (U_t)"][0] * unyt.s).in_base("galactic")
coords = (coords * uL).in_base("galactic")
rho = (rho * uM / uL ** 3).in_base("galactic")
vs = (vs * uL / ut).in_base("galactic")
t = (t * ut).in_base("galactic")
box = (box * uL).in_base("galactic")
coords -= 0.5 * box[None, :]
x = np.sqrt((coords ** 2).sum(axis=1))
v = (coords * vs).sum(axis=1) / x
x.convert_to_units("kpc")
v.convert_to_units("km/s")
t.convert_to_units("Myr")
rhohist, _, _ = stats.binned_statistic(x, rho, statistic="median", bins=100)
vhist, edges, _ = stats.binned_statistic(x, v, statistic="mean", bins=100)
mids = 0.5 * (edges[1:] + edges[:-1])
rhohist[np.isnan(rhohist)] = 0.0
imax = np.argmax(rhohist)
time[ifile] = t
radius[ifile] = mids[imax]
velocity[ifile] = vhist[imax]
isort = np.argsort(time)
time = time[isort]
radius = radius[isort]
velocity = velocity[isort]
with open(args.output, "w") as handle:
handle.write("# Radial profile of shock wave\n")
handle.write("# Column 0: time (Myr)\n")
handle.write("# Column 1: radius (kpc)\n")
handle.write("# Column 2: velocity (km/s)\n")
for t, r, v in zip(time, radius, velocity):
handle.write(f"{t:.5e}\t{r:.5e}\t{v:.5e}\n")
###############################################################################
# This file is part of SWIFT.
# Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import h5py
from numpy import *
# Generates a swift IC file for the Sedov blast test in a periodic cubic box
# Parameters
gamma = 5.0 / 3.0 # Gas adiabatic index
n0 = 0.1 # cm^-3
mp = 1.67e-24 # g
kB = 1.381e-16 # erg K^-1
pc = 3.086e18 # cm
rho0 = n0 * mp # Background density
T0 = 1.0e4 # Background pressure
E0 = 1.0e51 # Energy of the explosion
N_inject = 32 # Number of particles in which to inject energy
L = 256.0 * pc
fileName = "sedov.hdf5"
print(rho0, "cm s^-3")
uL = 1.0e3 * pc
uM = 1.99e30
uv = 1.0e10
ut = uL / uv
uU = uv ** 2
print("ut:", ut / 3.154e7, "yr")
# ---------------------------------------------------
glass = h5py.File("glassCube_64.hdf5", "r")
# Read particle positions and h from the glass
pos = glass["/PartType0/Coordinates"][:, :]
h = glass["/PartType0/SmoothingLength"][:]
pos *= L
h *= L
for i in range(3):
pos[pos[:, i] < 0.0, i] += L
pos[pos[:, i] >= L, i] -= L
numPart = size(h)
# newpos = zeros((numPart*8,3))
# newh = zeros(numPart*8)
# for ix in range(2):
# for iy in range(2):
# for iz in range(2):
# offset = ix*4+iy*2+iz
# newpos[offset*numPart:(offset+1)*numPart,:] = 0.5*pos[:,:]
# newpos[offset*numPart:(offset+1)*numPart,0] += 0.5*ix*L
# newpos[offset*numPart:(offset+1)*numPart,1] += 0.5*iy*L
# newpos[offset*numPart:(offset+1)*numPart,2] += 0.5*iz*L
# newh[offset*numPart:(offset+1)*numPart] = 0.5*h[:]
# pos = newpos
# h = newh
print(h, h.min(), h.max())
numPart = size(h)
vol = L ** 3
# Generate extra arrays
v = zeros((numPart, 3))
ids = linspace(1, numPart, numPart)
m = zeros(numPart)
u = zeros(numPart)
r = zeros(numPart)
r = sqrt(
(pos[:, 0] - 0.5 * L) ** 2 + (pos[:, 1] - 0.5 * L) ** 2 + (pos[:, 2] - 0.5 * L) ** 2
)
m[:] = rho0 * vol / numPart
# u[:] = P0 / (rho0 * (gamma - 1))
u[:] = kB * T0 / ((gamma - 1.0) * mp)
print(u.mean(), E0 / (N_inject * m[0]))
print(E0 / (N_inject * m[0]) / u.mean())
# Make the central particle detonate
index = argsort(r)
u[index[0:N_inject]] = E0 / (N_inject * m[0])
pos /= uL
h /= uL
L /= uL
m /= uM
u /= uU
print("L:", L)
print("m:", m.mean())
print("h:", h.mean())
print("u:", u.mean(), u.min(), u.max())
print("pos:", pos.min(), pos.max())
# --------------------------------------------------
# File
file = h5py.File(fileName, "w")
# Header
grp = file.create_group("/Header")
grp.attrs["BoxSize"] = [L, L, L]
grp.attrs["NumPart_Total"] = [numPart, 0, 0, 0, 0, 0]
grp.attrs["NumPart_Total_HighWord"] = [0, 0, 0, 0, 0, 0]
grp.attrs["NumPart_ThisFile"] = [numPart, 0, 0, 0, 0, 0]
grp.attrs["Time"] = 0.0
grp.attrs["NumFilesPerSnapshot"] = 1
grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
grp.attrs["Flag_Entropy_ICs"] = 0
grp.attrs["Dimension"] = 3
# Units
grp = file.create_group("/Units")
grp.attrs["Unit length in cgs (U_L)"] = uL
grp.attrs["Unit mass in cgs (U_M)"] = uM
grp.attrs["Unit time in cgs (U_t)"] = ut
grp.attrs["Unit current in cgs (U_I)"] = 1.0
grp.attrs["Unit temperature in cgs (U_T)"] = 1.0
# Particle group
grp = file.create_group("/PartType0")
grp.create_dataset("Coordinates", data=pos, dtype="d")
grp.create_dataset("Velocities", data=v, dtype="f")
grp.create_dataset("Masses", data=m, dtype="f")
grp.create_dataset("SmoothingLength", data=h, dtype="f")
grp.create_dataset("InternalEnergy", data=u, dtype="f")
grp.create_dataset("ParticleIDs", data=ids, dtype="L")
file.close()
folder=.
snaps=$(shell ls ${folder}/sedov_*.hdf5)
imgs=$(patsubst ${folder}/sedov_%.hdf5,${folder}/SedovCooling_%.png,$(snaps))
all: ${folder}/profile.png ${folder}/SedovCooling.mp4 ${folder}/energy.png
${folder}/energy.png: ${folder}/statistics.txt
python3 plot_energy.py ${folder}/statistics.txt ${folder}/energy.png
${folder}/profile.png: ${folder}/profile.txt
python3 plot_time_profile.py ${folder}/profile.txt ${folder}/profile.png
${folder}/profile.txt: $(snaps)
python3 get_time_profile.py $(snaps) ${folder}/profile.txt
${folder}/SedovCooling.mp4: $(imgs)
ffmpeg -y -framerate 10 -pattern_type glob -i "${folder}/SedovCooling_*.png" -c:v libx264 ${folder}/SedovCooling.mp4
${folder}/SedovCooling_%.png: ${folder}/sedov_%.hdf5
python3 plot_profile.py $< $@
import numpy as np
import argparse
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as pl
argparser = argparse.ArgumentParser()
argparser.add_argument("input", nargs="+")
argparser.add_argument("output")
argparser.add_argument("--names", "-n", nargs="+")
args = argparser.parse_args()
for ifile, file in enumerate(args.input):
data = np.loadtxt(
file,
usecols=(1, 13, 14, 16),
dtype=[
("Time", np.float32),
("Ekin", np.float32),
("Etherm", np.float32),
("Erad", np.float32),
],
)
color = f"C{ifile}"
if args.names:
pl.plot([], [], "-", color=color, label=args.names[ifile])
pl.plot(data["Time"], data["Ekin"], "-.", color=color)
pl.plot(data["Time"], data["Etherm"], "--", color=color)
pl.plot(data["Time"], data["Erad"], ":", color=color)
pl.plot(
data["Time"], data["Ekin"] + data["Etherm"] + data["Erad"], "-", color=color
)
pl.plot([], [], "k-", label="Total energy")
pl.plot([], [], "k-.", label="Kinetic energy")
pl.plot([], [], "k--", label="Thermal energy")
pl.plot([], [], "k:", label="Radiated energy")
pl.ylabel("Energy")
pl.xlabel("Time")
pl.legend(loc="best", ncol=3)
pl.tight_layout()
pl.savefig(args.output, dpi=300)
import numpy as np
import h5py
import argparse
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as pl
import scipy.stats as stats
import unyt
def bin_vals(x, y, log=True):
stat = "median" if log else "mean"
hist, edges, _ = stats.binned_statistic(x, y, statistic=stat, bins=100)
mids = 0.5 * (edges[1:] + edges[:-1])
return mids, hist
argparser = argparse.ArgumentParser()
argparser.add_argument("input")
argparser.add_argument("output")
args = argparser.parse_args()
x = None
rho = None
T = None
v = None
time = None
with h5py.File(args.input, "r") as handle:
coords = handle["PartType0/Coordinates"][:]
rho = handle["PartType0/Densities"][:]
T = handle["PartType0/Temperatures"][:]
vs = handle["PartType0/Velocities"][:]
time = handle["Header"].attrs["Time"][0]
box = handle["Header"].attrs["BoxSize"][:]
units = dict(handle["Units"].attrs)
uM = (units["Unit mass in cgs (U_M)"][0] * unyt.g).in_base("galactic")
uL = (units["Unit length in cgs (U_L)"][0] * unyt.cm).in_base("galactic")
ut = (units["Unit time in cgs (U_t)"][0] * unyt.s).in_base("galactic")
coords = (coords * uL).in_base("galactic")
rho = (rho * uM / uL ** 3).in_base("galactic")
T = T * unyt.K
vs = (vs * uL / ut).in_base("galactic")
time = (time * ut).in_base("galactic")
box = (box * uL).in_base("galactic")
coords -= 0.5 * box[None, :]
x = np.sqrt((coords ** 2).sum(axis=1))
v = (coords * vs).sum(axis=1) / x
rhohist, edges, _ = stats.binned_statistic(x, rho, statistic="median", bins=100)
mids = 0.5 * (edges[1:] + edges[:-1])
rhohist[np.isnan(rhohist)] = 0.0
imax = np.argmax(rhohist)
xmax = mids[imax]
x.name = "radius"
x.convert_to_units("kpc")
v.name = "radial velocity"
v.convert_to_units("km/s")
rho.name = "density"
rho.convert_to_units("g/cm**3")
T.name = "temperature"
fig, ax = pl.subplots(1, 3, figsize=(8, 4), sharex=True)
with unyt.matplotlib_support:
ax[0].semilogy(x, rho, ".")
b, h = bin_vals(x, rho)
ax[0].semilogy(b, h, "--")
ax[1].plot(x, v, ".")
b, h = bin_vals(x, v, log=False)
ax[1].plot(b, h, "--")
ax[2].semilogy(x, T, ".")
b, h = bin_vals(x, T)
ax[2].semilogy(b, h, "--")
for a in ax:
a.axvline(x=xmax, color="k", linestyle="--")
ax[0].set_ylim(1.0e-28, 1.0e-24)
ax[1].set_ylim(-10.0, 200.0)
ax[2].set_ylim(10.0, 1.0e8)
ax[1].set_title(f"t = {time:.2e}")
pl.tight_layout()
pl.savefig(args.output, dpi=300)
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as pl
import argparse
import unyt
argparser = argparse.ArgumentParser()
argparser.add_argument("input", nargs="+")
argparser.add_argument("output")
argparser.add_argument("--names", "-n", nargs="+")
args = argparser.parse_args()
fig, ax = pl.subplots(1, 2, sharex=True)
for ifile, file in enumerate(args.input):
data = np.loadtxt(
file,
dtype=[("time", np.float32), ("radius", np.float32), ("velocity", np.float32)],
)
t = data["time"] * unyt.Myr
t.name = "time"
r = data["radius"] * unyt.kpc
r.name = "radius"
v = data["velocity"] * unyt.km / unyt.s
v.name = "velocity"
label = None
if args.names:
label = args.names[ifile]
with unyt.matplotlib_support:
ax[0].plot(t, r, "-", label=label)
ax[1].plot(t, v, "-", label=label)
if args.names:
ax[1].legend(loc="best")
pl.tight_layout()
pl.savefig(args.output, dpi=300)
folder=run
pwd=$(shell pwd)
${folder}/profile.png: ${folder}/sedov_0100.hdf5
make -f make_visualisations.make -j 16 folder=${folder}
${folder}/sedov_0100.hdf5: ${folder}/swift ${folder}/sedov.hdf5 ${folder}/coolingtables/redshifts.dat ${folder}/sedov.yml
cd ${folder}; ./swift --threads 8 --hydro --cooling --limiter sedov.yml
${folder}/sedov.hdf5: sedov.hdf5
ln -s ${pwd}/sedov.hdf5 ${folder}/sedov.hdf5
${folder}/coolingtables/redshifts.dat: coolingtables/redshifts.dat
ln -s ${pwd}/coolingtables ${folder}/coolingtables
${folder}/swift: ../../../swift
mkdir -p ${folder}; ln -s ${pwd}/../../../swift ${folder}/swift
${folder}/sedov.yml: sedov.yml
ln -s ${pwd}/sedov.yml ${folder}/sedov.yml
sedov.hdf5: glassCube_64.hdf5
python3 makeIC.py
glassCube_64.hdf5:
bash getGlass.sh
coolingtables/redshifts.dat:
bash ../getEagleCoolingTable.sh
# Define the system of units to use internally.
InternalUnitSystem:
UnitMass_in_cgs: 1.99e33 # g
UnitLength_in_cgs: 3.086e21 # cm
UnitVelocity_in_cgs: 1.e5 # Centimeters per second
UnitCurrent_in_cgs: 1 # Amperes
UnitTemp_in_cgs: 1 # Kelvin
# Parameters governing the time integration
TimeIntegration:
time_begin: 0. # The starting time of the simulation (in internal units).
time_end: 1.e-3 # The end time of the simulation (in internal units).
dt_min: 1.e-13 # The minimal time-step size of the simulation (in internal units).
dt_max: 1.e-3 # The maximal time-step size of the simulation (in internal units).
# Parameters governing the snapshots
Snapshots:
basename: sedov # Common part of the name of output files
time_first: 0. # Time of the first output (in internal units)
delta_time: 1.e-5 # Time difference between consecutive outputs (in internal units)
# Parameters governing the conserved quantities statistics
Statistics:
delta_time: 1e-5 # Time between statistics output
# Parameters for the hydrodynamics scheme
SPH:
resolution_eta: 1.2348 # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
CFL_condition: 0.1 # Courant-Friedrich-Levy condition for time integration.
minimal_temperature: 1.e2
max_ghost_iterations: 1000
h_max: 0.04 # Maximum smoothing length. Since the density in the post-shock region becomes very low, we need to make sure that this does not exceed 1/3 of the periodic box (including kernel gamma ~ 2)
# Parameters related to the initial conditions
InitialConditions:
file_name: ./sedov.hdf5 # The file to read
periodic: 1
EAGLECooling:
H_reion_z: 11.5
H_reion_eV_p_H: 2.0
He_reion_z_centre: 3.5
He_reion_z_sigma: 0.5
He_reion_eV_p_H: 2.0
dir_name: ./coolingtables/
EAGLEChemistry:
init_abundance_metal: 0.014 # Mass fraction in *all* metals
init_abundance_Hydrogen: 0.70649785 # Mass fraction in Hydrogen
init_abundance_Helium: 0.28055534 # Mass fraction in Helium
init_abundance_Carbon: 2.0665436e-3 # Mass fraction in Carbon
init_abundance_Nitrogen: 8.3562563e-4 # Mass fraction in Nitrogen
init_abundance_Oxygen: 5.4926244e-3 # Mass fraction in Oxygen
init_abundance_Neon: 1.4144605e-3 # Mass fraction in Neon
init_abundance_Magnesium: 5.907064e-4 # Mass fraction in Magnesium
init_abundance_Silicon: 6.825874e-4 # Mass fraction in Silicon
init_abundance_Iron: 1.1032152e-3 # Mass fraction in Iron
EoS:
isothermal_internal_energy: 1240419161676.6465
LambdaTTableCooling:
file_name: cooling.dat
ConstCooling:
cooling_rate: 1.e6
min_energy: 0.01
cooling_tstep_mult: 2
LambdaCooling:
lambda_nH2_cgs: 2.e-23
cooling_tstep_mult: 0.1
rapid_cooling: 1
Scheduler:
max_top_level_cells: 12
cell_max_size: 8000000
cell_sub_size_pair_hydro: 256000000
cell_sub_size_self_hydro: 32000
cell_sub_size_pair_stars: 256000000
cell_sub_size_self_stars: 32000
cell_sub_size_pair_grav: 256000000
cell_sub_size_self_grav: 32000
cell_split_size: 400
cell_subdepth_diff_grav: 4
cell_extra_parts: 0
cell_extra_sparts: 100
cell_extra_gparts: 0
cell_extra_bparts: 0
cell_extra_sinks: 0
engine_max_parts_per_ghost: 1000
engine_max_sparts_per_ghost: 1000
engine_max_parts_per_cooling: 10000
nr_queues: 4
dependency_graph_frequency: 0
task_level_output_frequency: 0
tasks_per_cell: 100
links_per_tasks: 25
mpi_message_limit: 4
...@@ -42,10 +42,10 @@ Statistics: ...@@ -42,10 +42,10 @@ Statistics:
# Parameters for the self-gravity scheme # Parameters for the self-gravity scheme
Gravity: Gravity:
eta: 0.025 # Constant dimensionless multiplier for time integration. eta: 0.025 # Constant dimensionless multiplier for time integration.
MAC: geometric MAC: adaptive
epsilon_fmm: 0.001
theta_cr: 0.7 # Opening angle (Multipole acceptance criterion) theta_cr: 0.7 # Opening angle (Multipole acceptance criterion)
use_tree_below_softening: 1 mesh_side_length: 1024
mesh_side_length: 256
comoving_DM_softening: 0.0026994 # Comoving DM softening length (in internal units). comoving_DM_softening: 0.0026994 # Comoving DM softening length (in internal units).
max_physical_DM_softening: 0.0007 # Max physical DM softening length (in internal units). max_physical_DM_softening: 0.0007 # Max physical DM softening length (in internal units).
......
...@@ -42,10 +42,10 @@ Statistics: ...@@ -42,10 +42,10 @@ Statistics:
# Parameters for the self-gravity scheme # Parameters for the self-gravity scheme
Gravity: Gravity:
eta: 0.025 # Constant dimensionless multiplier for time integration. eta: 0.025 # Constant dimensionless multiplier for time integration.
MAC: geometric MAC: adaptive
epsilon_fmm: 0.001
theta_cr: 0.7 # Opening angle (Multipole acceptance criterion) theta_cr: 0.7 # Opening angle (Multipole acceptance criterion)
use_tree_below_softening: 1 mesh_side_length: 128
mesh_side_length: 32
comoving_DM_softening: 0.0026994 # Comoving DM softening length (in internal units). comoving_DM_softening: 0.0026994 # Comoving DM softening length (in internal units).
max_physical_DM_softening: 0.0007 # Max physical DM softening length (in internal units). max_physical_DM_softening: 0.0007 # Max physical DM softening length (in internal units).
......
...@@ -42,10 +42,10 @@ Statistics: ...@@ -42,10 +42,10 @@ Statistics:
# Parameters for the self-gravity scheme # Parameters for the self-gravity scheme
Gravity: Gravity:
eta: 0.025 # Constant dimensionless multiplier for time integration. eta: 0.025 # Constant dimensionless multiplier for time integration.
MAC: geometric MAC: adaptive
epsilon_fmm: 0.001
theta_cr: 0.7 # Opening angle (Multipole acceptance criterion) theta_cr: 0.7 # Opening angle (Multipole acceptance criterion)
use_tree_below_softening: 1 mesh_side_length: 256
mesh_side_length: 64
comoving_DM_softening: 0.0026994 # Comoving DM softening length (in internal units). comoving_DM_softening: 0.0026994 # Comoving DM softening length (in internal units).
max_physical_DM_softening: 0.0007 # Max physical DM softening length (in internal units). max_physical_DM_softening: 0.0007 # Max physical DM softening length (in internal units).
......
...@@ -41,10 +41,10 @@ Statistics: ...@@ -41,10 +41,10 @@ Statistics:
# Parameters for the self-gravity scheme # Parameters for the self-gravity scheme
Gravity: Gravity:
eta: 0.025 # Constant dimensionless multiplier for time integration. eta: 0.025 # Constant dimensionless multiplier for time integration.
MAC: geometric MAC: adaptive
epsilon_fmm: 0.001
theta_cr: 0.7 # Opening angle (Multipole acceptance criterion) theta_cr: 0.7 # Opening angle (Multipole acceptance criterion)
use_tree_below_softening: 1 mesh_side_length: 512
mesh_side_length: 128
comoving_DM_softening: 0.0026994 # Comoving DM softening length (in internal units). comoving_DM_softening: 0.0026994 # Comoving DM softening length (in internal units).
max_physical_DM_softening: 0.0007 # Max physical DM softening length (in internal units). max_physical_DM_softening: 0.0007 # Max physical DM softening length (in internal units).
......
1D advection test for radiative transfer.
This test is identical to the examples/RadiativeTransferTests/Advection_1D
with the difference that particles aren't equally spaced on a line, but
separated into three regions with decreasing intervals between particles
in each of the region.
More concretely, for a box of size 1, the separation is given as
dx if x < 0.33
dx/2 if 0.33 < x < 0.66
dx/4 if x >= 0.66
This leads to the three regions having different time step sizes for RT.
Test that your method is TVD and the propagation speed of the photons is
correct. The ICs set up three photon groups:
- The first is a top hat function initial distribution where outside values
are zero.
- The second is a top hat function initial distribution where outside values
are nonzero. This distinction is important to test because photon energies
can't be negative, so these cases need to be tested individually.
- the third is a smooth Gaussian.
This way, you can test multiple initial condition scenarios simultaneously.
There are no stars to act as sources. Also make sure that you choose your
photon frequencies in a way that doesn't interact with gas!
The ICs are created to be compatible with GEAR_RT and SPHM1RT. Recommended configuration:
GEAR_RT:
--with-rt=GEAR_3 --with-rt-riemann-solver=GLF --with-hydro-dimension=1 --with-hydro=gizmo-mfv \
--with-riemann-solver=hllc --with-stars=GEAR --with-feedback=none --with-grackle=$GRACKLE_ROOT
SPHM1RT:
--with-rt=SPHM1RT_3 --with-hydro-dimension=1 --with-stars=basic --with-sundials=$SUNDIALS_ROOT
IMPORTANT: Need SUNDIALS version = 5 .
SUNDIALS_ROOT is the root directory that contains the lib and include directories, e.g. on cosma:
SUNDIALS_ROOT=/cosma/local/sundials/5.1.0/
Note that in SPHM1RT any (SPH) hydro scheme is compatible.
Note that if you want to use a reduced speed of light for this test, you also
need to adapt the fluxes in the initial conditions! They are generated assuming
that the speed of light is not reduced.
#!/usr/bin/env python3
###############################################################################
# This file is part of SWIFT.
# Copyright (c) 2021 Mladen Ivkovic (mladen.ivkovic@hotmail.com)
# 2022 Tsang Keung Chan (chantsangkeung@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# -----------------------------------------------------------
# Add initial conditions for photon energies and fluxes
# for 1D advection of photons.
# First photon group: Top hat function with zero as the
# baseline.
# Second photon group: Top hat function with nonzero value
# as the baseline.
# Third photon group: Gaussian.
# -----------------------------------------------------------
import h5py
import numpy as np
import unyt
from swiftsimio import Writer
# define unit system to use
unitsystem = unyt.unit_systems.cgs_unit_system
# Box is 1 Mpc
boxsize = 1e10 * unitsystem["length"]
# number of photon groups
nPhotonGroups = 3
# number of particles in each dimension
dx_init = boxsize / 1000
# filename of ICs to be generated
outputfilename = "advection_1D.hdf5"
def initial_condition(x):
"""
The initial conditions that will be advected
x: particle position. 3D unyt array
returns:
E: photon energy density for each photon group. List of scalars with size of nPhotonGroups
F: photon flux for each photon group. List with size of nPhotonGroups of numpy arrays of shape (3,)
"""
# you can make the photon quantities unitless, the units will
# already have been written down in the writer.
E_list = []
F_list = []
# Group 1 Photons:
# -------------------
if x[0] < 0.33 * boxsize:
E = 0.0
elif x[0] < 0.66 * boxsize:
E = 1.0
else:
E = 0.0
# Assuming all photons flow in only one direction
# (optically thin regime, "free streaming limit"),
# we have that |F| = c * E
F = np.zeros(3, dtype=np.float64)
F[0] = unyt.c.to(unitsystem["length"] / unitsystem["time"]) * E
E_list.append(E)
F_list.append(F)
# Group 2 Photons:
# -------------------
if x[0] < 0.33 * boxsize:
E = 1.0
elif x[0] < 0.66 * boxsize:
E = 3.0
else:
E = 1.0
F = np.zeros(3, dtype=np.float64)
F[0] = unyt.c.to(unitsystem["length"] / unitsystem["time"]) * E
E_list.append(E)
F_list.append(F)
# Group 3 Photons:
# -------------------
sigma = 0.1 * boxsize
mean = 0.5 * boxsize
amplitude = 2.0
E = amplitude * np.exp(-(x[0] - mean) ** 2 / (2 * sigma ** 2))
F = np.zeros(3, dtype=np.float64)
F[0] = unyt.c.to(unitsystem["length"] / unitsystem["time"]) * E
E_list.append(E)
F_list.append(F)
return E_list, F_list
if __name__ == "__main__":
xp_list = []
volumes_list = []
x = 0.5 * dx_init
while x < boxsize:
if x < 0.33 * boxsize:
dx = dx_init
elif 0.33 * boxsize <= x <= 0.66 * boxsize:
dx = dx_init / 2
else:
dx = dx_init / 4
x = x + dx
xp_list.append(x)
volumes_list.append(dx)
n_p = len(xp_list)
xp = unyt.unyt_array(np.zeros((n_p, 3), dtype=np.float64), boxsize.units)
volumes = unyt.unyt_array(np.zeros((n_p), dtype=np.float64), boxsize.units ** 3)
for i in range(n_p):
xp[i, 0] = xp_list[i]
volumes[i] = volumes_list[i]
w = Writer(unyt.unit_systems.cgs_unit_system, boxsize, dimension=1)
w.gas.coordinates = xp
w.gas.velocities = np.zeros(xp.shape) * (unyt.cm / unyt.s)
w.gas.masses = np.ones(xp.shape[0], dtype=np.float64) * 1000 * unyt.g
w.gas.internal_energy = (
np.ones(xp.shape[0], dtype=np.float64) * (300.0 * unyt.kb * unyt.K) / unyt.g
)
# Generate initial guess for smoothing lengths based on MIPS
w.gas.generate_smoothing_lengths(boxsize=boxsize, dimension=1)
# If IDs are not present, this automatically generates
w.write(outputfilename)
# Now open file back up again and add photon groups
# you can make them unitless, the units have already been
# written down in the writer. In this case, it's in cgs.
F = h5py.File(outputfilename, "r+")
header = F["Header"]
nparts = header.attrs["NumPart_ThisFile"][0]
parts = F["/PartType0"]
for grp in range(nPhotonGroups):
dsetname = "PhotonEnergiesGroup{0:d}".format(grp + 1)
energydata = np.zeros((nparts), dtype=np.float32)
parts.create_dataset(dsetname, data=energydata)
dsetname = "PhotonFluxesGroup{0:d}".format(grp + 1)
# if dsetname not in parts.keys():
fluxdata = np.zeros((nparts, 3), dtype=np.float32)
parts.create_dataset(dsetname, data=fluxdata)
for p in range(nparts):
E, Flux = initial_condition(xp[p])
for g in range(nPhotonGroups):
# Esetname = "PhotonEnergiesGroup{0:d}".format(g + 1)
# parts[Esetname][p] = E[g] / volumes[p]
# Fsetname = "PhotonFluxesGroup{0:d}".format(g + 1)
# parts[Fsetname][p] = Flux[g] / volumes[p]
# Esetname = "PhotonEnergiesGroup{0:d}".format(g + 1)
# parts[Esetname][p] = E[g]
# Fsetname = "PhotonFluxesGroup{0:d}".format(g + 1)
# parts[Fsetname][p] = Flux[g]
Esetname = "PhotonEnergiesGroup{0:d}".format(g + 1)
parts[Esetname][p] = E[g] * volumes[p]
Fsetname = "PhotonFluxesGroup{0:d}".format(g + 1)
parts[Fsetname][p] = Flux[g] * volumes[p]
# from matplotlib import pyplot as plt
# plt.figure()
# for g in range(nPhotonGroups):
# # Esetname = "PhotonEnergiesGroup{0:d}".format(g+1)
# # plt.plot(xp[:,0], parts[Esetname], label="E "+str(g+1))
# Fsetname = "PhotonFluxesGroup{0:d}".format(g+1)
# plt.plot(xp[:,0], parts[Fsetname][:,0], label="F "+str(g+1))
# plt.legend()
# plt.show()
F.close()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment