Commit 13bae9eb authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Merge branch 'master' into dopair-vectorisation-merge

parents a8734760 176aebc3
......@@ -73,6 +73,9 @@ tests/testRiemannExact
tests/testRiemannTRRS
tests/testRiemannHLLC
tests/testMatrixInversion
tests/testVoronoi1D
tests/testVoronoi2D
tests/testVoronoi3D
tests/testDump
tests/testLogger
tests/benchmarkInteractions
......
......@@ -460,7 +460,6 @@ fi
# Check for HDF5. This is required.
AX_LIB_HDF5
if test "$with_hdf5" != "yes"; then
AC_MSG_ERROR([Could not find a working HDF5 library])
fi
......@@ -468,8 +467,6 @@ fi
# We want to know if this HDF5 supports MPI and whether we should use it.
# The default is to use MPI support if it is available, i.e. this is
# a parallel HDF5.
# To do this need to ask the HDF5 compiler about its configuration,
# -showconfig should have yes/no.
have_parallel_hdf5="no"
if test "$with_hdf5" = "yes"; then
AC_ARG_ENABLE([parallel-hdf5],
......@@ -482,7 +479,14 @@ if test "$with_hdf5" = "yes"; then
if test "$enable_parallel_hdf5" = "yes"; then
AC_MSG_CHECKING([for HDF5 parallel support])
parallel=`$H5CC -showconfig | grep "Parallel HDF5:" | awk '{print $3}'`
# Check if the library is capable, the header should define H5_HAVE_PARALLEL.
AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
#include "hdf5.h"
#ifndef H5_HAVE_PARALLEL
# error macro not defined
#endif
]])], [parallel="yes"], [parallel="no"])
if test "$parallel" = "yes"; then
have_parallel_hdf5="yes"
AC_DEFINE([HAVE_PARALLEL_HDF5],1,[HDF5 library supports parallel access])
......@@ -583,7 +587,7 @@ fi
# Hydro scheme.
AC_ARG_WITH([hydro],
[AS_HELP_STRING([--with-hydro=<scheme>],
[Hydro dynamics to use @<:@gadget2, minimal, hopkins, default, gizmo default: gadget2@:>@]
[Hydro dynamics to use @<:@gadget2, minimal, hopkins, default, gizmo, shadowfax default: gadget2@:>@]
)],
[with_hydro="$withval"],
[with_hydro="gadget2"]
......@@ -604,6 +608,9 @@ case "$with_hydro" in
gizmo)
AC_DEFINE([GIZMO_SPH], [1], [GIZMO SPH])
;;
shadowfax)
AC_DEFINE([SHADOWFAX_SPH], [1], [Shadowfax SPH])
;;
*)
AC_MSG_ERROR([Unknown hydrodynamics scheme: $with_hydro])
......@@ -792,6 +799,15 @@ case "$with_potential" in
;;
esac
# Gravity multipole order
AC_ARG_WITH([multipole-order],
[AS_HELP_STRING([--with-multipole-order=<order>],
[order of the multipole and gravitational field expansion @<:@ default: 3@:>@]
)],
[with_multipole_order="$withval"],
[with_multipole_order="3"]
)
AC_DEFINE_UNQUOTED([SELF_GRAVITY_MULTIPOLE_ORDER], [$with_multipole_order], [Multipole order])
# Check for git, needed for revision stamps.
......@@ -839,6 +855,7 @@ AC_MSG_RESULT([
Riemann solver : $with_riemann
Cooling function : $with_cooling
External potential : $with_potential
Multipole order : $with_multipole_order
Task debugging : $enable_task_debugging
Debugging checks : $enable_debugging_checks
......
......@@ -13,6 +13,9 @@ TimeIntegration:
dt_min: 1e-10 # The minimal time-step size of the simulation (in internal units).
dt_max: 1e-4 # The maximal time-step size of the simulation (in internal units).
Scheduler:
cell_split_size: 50
# Parameters governing the snapshots
Snapshots:
basename: eagle # Common part of the name of output files
......@@ -23,6 +26,13 @@ Snapshots:
Statistics:
delta_time: 1e-2 # Time between statistics output
# Parameters for the self-gravity scheme
Gravity:
eta: 0.025 # Constant dimensionless multiplier for time integration.
epsilon: 0.0001 # Softening length (in internal units).
a_smooth: 1000.
r_cut: 4.
# Parameters for the hydrodynamics scheme
SPH:
resolution_eta: 1.2348 # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
......
......@@ -23,6 +23,13 @@ Snapshots:
Statistics:
delta_time: 1e-2 # Time between statistics output
# Parameters for the self-gravity scheme
Gravity:
eta: 0.025 # Constant dimensionless multiplier for time integration.
epsilon: 0.0001 # Softening length (in internal units).
a_smooth: 1000.
r_cut: 4.
# Parameters for the hydrodynamics scheme
SPH:
resolution_eta: 1.2348 # Target smoothing length in units of the mean inter-particle separation (1.2348 == 48Ngbs with the cubic spline kernel).
......
#!/usr/bin/env python
import sys
import glob
import re
import numpy as np
import matplotlib.pyplot as plt
params = {'axes.labelsize': 14,
'axes.titlesize': 18,
'font.size': 12,
'legend.fontsize': 12,
'xtick.labelsize': 14,
'ytick.labelsize': 14,
'text.usetex': True,
'figure.figsize': (10, 10),
'figure.subplot.left' : 0.06,
'figure.subplot.right' : 0.99 ,
'figure.subplot.bottom' : 0.06 ,
'figure.subplot.top' : 0.985 ,
'figure.subplot.wspace' : 0.14 ,
'figure.subplot.hspace' : 0.14 ,
'lines.markersize' : 6,
'lines.linewidth' : 3.,
'text.latex.unicode': True
}
plt.rcParams.update(params)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Times']})
min_error = 1e-6
max_error = 1e-1
num_bins = 51
# Construct the bins
bin_edges = np.linspace(np.log10(min_error), np.log10(max_error), num_bins + 1)
bin_size = (np.log10(max_error) - np.log10(min_error)) / num_bins
bins = 0.5*(bin_edges[1:] + bin_edges[:-1])
bin_edges = 10**bin_edges
bins = 10**bins
# Colours
cols = ['b', 'g', 'r', 'm']
# Time-step to plot
step = int(sys.argv[1])
# Find the files for the different expansion orders
order_list = glob.glob("gravity_checks_step%d_order*.dat"%step)
num_order = len(order_list)
# Get the multipole orders
order = np.zeros(num_order)
for i in range(num_order):
order[i] = int(order_list[i][26])
# Start the plot
plt.figure()
# Get the Gadget-2 data if existing
gadget2_file_list = glob.glob("forcetest_gadget2.txt")
if len(gadget2_file_list) != 0:
gadget2_data = np.loadtxt(gadget2_file_list[0])
gadget2_ids = gadget2_data[:,0]
gadget2_pos = gadget2_data[:,1:4]
gadget2_a_exact = gadget2_data[:,4:7]
gadget2_a_grav = gadget2_data[:, 7:10]
# Sort stuff
sort_index = np.argsort(gadget2_ids)
gadget2_ids = gadget2_ids[sort_index]
gadget2_pos = gadget2_pos[sort_index, :]
gadget2_a_exact = gadget2_a_exact[sort_index, :]
gadget2_a_grav = gadget2_a_grav[sort_index, :]
# Compute the error norm
diff = gadget2_a_exact - gadget2_a_grav
norm_diff = np.sqrt(diff[:,0]**2 + diff[:,1]**2 + diff[:,2]**2)
norm_a = np.sqrt(gadget2_a_exact[:,0]**2 + gadget2_a_exact[:,1]**2 + gadget2_a_exact[:,2]**2)
norm_error = norm_diff / norm_a
error_x = abs(diff[:,0]) / norm_a
error_y = abs(diff[:,1]) / norm_a
error_z = abs(diff[:,2]) / norm_a
# Bin the error
norm_error_hist,_ = np.histogram(norm_error, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
error_x_hist,_ = np.histogram(error_x, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
error_y_hist,_ = np.histogram(error_y, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
error_z_hist,_ = np.histogram(error_z, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
norm_median = np.median(norm_error)
median_x = np.median(error_x)
median_y = np.median(error_y)
median_z = np.median(error_z)
norm_per95 = np.percentile(norm_error,95)
per95_x = np.percentile(error_x,95)
per95_y = np.percentile(error_y,95)
per95_z = np.percentile(error_z,95)
plt.subplot(221)
plt.semilogx(bins, norm_error_hist, 'k--', label="Gadget-2")
plt.plot([norm_median, norm_median], [2.7, 3], 'k-', lw=1)
plt.plot([norm_per95, norm_per95], [2.7, 3], 'k:', lw=1)
plt.subplot(222)
plt.semilogx(bins, error_x_hist, 'k--', label="Gadget-2")
plt.plot([median_x, median_x], [1.8, 2], 'k-', lw=1)
plt.plot([per95_x, per95_x], [1.8, 2], 'k:', lw=1)
plt.subplot(223)
plt.semilogx(bins, error_y_hist, 'k--', label="Gadget-2")
plt.plot([median_y, median_y], [1.8, 2], 'k-', lw=1)
plt.plot([per95_y, per95_y], [1.8, 2], 'k:', lw=1)
plt.subplot(224)
plt.semilogx(bins, error_z_hist, 'k--', label="Gadget-2")
plt.plot([median_z, median_z], [1.8, 2], 'k-', lw=1)
plt.plot([per95_z, per95_z], [1.8, 2], 'k:', lw=1)
# Plot the different histograms
for i in range(num_order-1, -1, -1):
data = np.loadtxt(order_list[i])
ids = data[:,0]
pos = data[:,1:4]
a_exact = data[:,4:7]
a_grav = data[:, 7:10]
# Sort stuff
sort_index = np.argsort(ids)
ids = ids[sort_index]
pos = pos[sort_index, :]
a_exact = a_exact[sort_index, :]
a_grav = a_grav[sort_index, :]
# Cross-checks
if not np.array_equal(ids, gadget2_ids):
print "Comparing different IDs !"
if not np.array_equal(pos, gadget2_pos):
print "Comparing different positions ! max difference:", np.max(pos - gadget2_pos)
if not np.array_equal(a_exact, gadget2_a_exact):
print "Comparing different exact accelerations ! max difference:", np.max(a_exact - gadget2_a_exact)
# Compute the error norm
diff = a_exact - a_grav
norm_diff = np.sqrt(diff[:,0]**2 + diff[:,1]**2 + diff[:,2]**2)
norm_a = np.sqrt(a_exact[:,0]**2 + a_exact[:,1]**2 + a_exact[:,2]**2)
norm_error = norm_diff / norm_a
error_x = abs(diff[:,0]) / norm_a
error_y = abs(diff[:,1]) / norm_a
error_z = abs(diff[:,2]) / norm_a
# Bin the error
norm_error_hist,_ = np.histogram(norm_error, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
error_x_hist,_ = np.histogram(error_x, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
error_y_hist,_ = np.histogram(error_y, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
error_z_hist,_ = np.histogram(error_z, bins=bin_edges, density=False) / (np.size(norm_error) * bin_size)
norm_median = np.median(norm_error)
median_x = np.median(error_x)
median_y = np.median(error_y)
median_z = np.median(error_z)
norm_per95 = np.percentile(norm_error,95)
per95_x = np.percentile(error_x,95)
per95_y = np.percentile(error_y,95)
per95_z = np.percentile(error_z,95)
plt.subplot(221)
plt.semilogx(bins, norm_error_hist, color=cols[i],label="SWIFT m-poles order %d"%order[i])
plt.plot([norm_median, norm_median], [2.7, 3],'-', color=cols[i], lw=1)
plt.plot([norm_per95, norm_per95], [2.7, 3],':', color=cols[i], lw=1)
plt.subplot(222)
plt.semilogx(bins, error_x_hist, color=cols[i],label="SWIFT m-poles order %d"%order[i])
plt.plot([median_x, median_x], [1.8, 2],'-', color=cols[i], lw=1)
plt.plot([per95_x, per95_x], [1.8, 2],':', color=cols[i], lw=1)
plt.subplot(223)
plt.semilogx(bins, error_y_hist, color=cols[i],label="SWIFT m-poles order %d"%order[i])
plt.plot([median_y, median_y], [1.8, 2],'-', color=cols[i], lw=1)
plt.plot([per95_y, per95_y], [1.8, 2],':', color=cols[i], lw=1)
plt.subplot(224)
plt.semilogx(bins, error_z_hist, color=cols[i],label="SWIFT m-poles order %d"%order[i])
plt.plot([median_z, median_z], [1.8, 2],'-', color=cols[i], lw=1)
plt.plot([per95_z, per95_z], [1.8, 2],':', color=cols[i], lw=1)
plt.subplot(221)
plt.xlabel("$|\delta \overrightarrow{a}|/|\overrightarrow{a}_{exact}|$")
plt.ylabel("Density")
plt.xlim(min_error, 2*max_error)
plt.ylim(0,3)
plt.legend(loc="upper left")
plt.subplot(222)
plt.xlabel("$\delta a_x/|\overrightarrow{a}_{exact}|$")
plt.ylabel("Density")
plt.xlim(min_error, 2*max_error)
plt.ylim(0,2)
#plt.legend(loc="center left")
plt.subplot(223)
plt.xlabel("$\delta a_y/|\overrightarrow{a}_{exact}|$")
plt.ylabel("Density")
plt.xlim(min_error, 2*max_error)
plt.ylim(0,2)
#plt.legend(loc="center left")
plt.subplot(224)
plt.xlabel("$\delta a_z/|\overrightarrow{a}_{exact}|$")
plt.ylabel("Density")
plt.xlim(min_error, 2*max_error)
plt.ylim(0,2)
#plt.legend(loc="center left")
plt.savefig("gravity_checks_step%d.png"%step)
......@@ -323,14 +323,14 @@ int main(int argc, char *argv[]) {
/* How large are the parts? */
if (myrank == 0) {
message("sizeof(part) is %4zi bytes.", sizeof(struct part));
message("sizeof(xpart) is %4zi bytes.", sizeof(struct xpart));
message("sizeof(spart) is %4zi bytes.", sizeof(struct spart));
message("sizeof(gpart) is %4zi bytes.", sizeof(struct gpart));
message("sizeof(multipole) is %4zi bytes.", sizeof(struct multipole));
message("sizeof(acc_tensor) is %4zi bytes.", sizeof(struct acc_tensor));
message("sizeof(task) is %4zi bytes.", sizeof(struct task));
message("sizeof(cell) is %4zi bytes.", sizeof(struct cell));
message("sizeof(part) is %4zi bytes.", sizeof(struct part));
message("sizeof(xpart) is %4zi bytes.", sizeof(struct xpart));
message("sizeof(spart) is %4zi bytes.", sizeof(struct spart));
message("sizeof(gpart) is %4zi bytes.", sizeof(struct gpart));
message("sizeof(multipole) is %4zi bytes.", sizeof(struct multipole));
message("sizeof(grav_tensor) is %4zi bytes.", sizeof(struct grav_tensor));
message("sizeof(task) is %4zi bytes.", sizeof(struct task));
message("sizeof(cell) is %4zi bytes.", sizeof(struct cell));
}
/* Read the parameter file */
......
......@@ -57,14 +57,15 @@ pl.rcParams.update(PLOT_PARAMS)
# Tasks and subtypes. Indexed as in tasks.h.
TASKTYPES = ["none", "sort", "self", "pair", "sub_self", "sub_pair",
"init", "ghost", "extra_ghost", "drift", "kick1", "kick2",
"timestep", "send", "recv", "grav_gather_m", "grav_fft",
"grav_mm", "grav_up", "cooling", "sourceterms", "count"]
"timestep", "send", "recv", "grav_top_level", "grav_long_range",
"grav_mm", "grav_down", "cooling", "sourceterms", "count"]
SUBTYPES = ["none", "density", "gradient", "force", "grav", "external_grav",
"tend", "xv", "rho", "gpart", "count"]
"tend", "xv", "rho", "gpart", "multipole", "spart", "count"]
# Task/subtypes of interest.
FULLTYPES = ["self/force", "self/density", "sub_self/force",
"sub_self/density", "pair/force", "pair/density", "sub_pair/force",
FULLTYPES = ["self/force", "self/density", "self/grav", "sub_self/force",
"sub_self/density", "pair/force", "pair/density", "pair/grav",
"sub_pair/force",
"sub_pair/density", "recv/xv", "send/xv", "recv/rho", "send/rho",
"recv/tend", "send/tend"]
......
......@@ -22,7 +22,10 @@
# yes - do check for HDF5 library in standard locations.
# path - complete path to the HDF5 helper script h5cc or h5pcc.
#
# SWIFT modification: HDF5 is required, so only path is described.
# SWIFT modifications: HDF5 is required, so only path is described,
# when the h5cc or h5pcc commands are not available, we check if
# HDF5 can be used anyway and no macros are defined except HAVE_HDF5
# and with_hdf5.
#
# If HDF5 is successfully found, this macro calls
#
......@@ -155,11 +158,37 @@ if test "$with_hdf5" = "yes"; then
AC_MSG_CHECKING([Using provided HDF5 C wrapper])
AC_MSG_RESULT([$H5CC])
fi
AC_MSG_CHECKING([for HDF5 libraries])
if test ! -f "$H5CC" || test ! -x "$H5CC"; then
AC_MSG_RESULT([no])
AC_MSG_WARN(m4_case(m4_normalize([$1]),
[serial], [
dnl Check if we already have HDF5 for C.
AC_CHECK_HEADER([hdf5.h], [ac_cv_hhdf5_h=yes], [ac_cv_hhdf5_h=no], [AC_INCLUDES_DEFAULT])
AC_CHECK_LIB([hdf5], [H5Fcreate], [ac_cv_libhdf5=yes],
[ac_cv_libhdf5=no])
if test "$ac_cv_hhdf5_h" = "yes" && test "$ac_cv_libhdf5" = "yes" ; then
dnl Can compile and link, so we have a HDF5, just don't know which version.
AC_MSG_CHECKING([for HDF5 libraries])
AC_MSG_RESULT([yes])
with_hdf5="yes"
HDF5_VERSION="unknown"
HDF5_LIBS="-lhdf5"
AC_SUBST([HDF5_VERSION])
AC_SUBST([HDF5_CC])
AC_SUBST([HDF5_CFLAGS])
AC_SUBST([HDF5_CPPFLAGS])
AC_SUBST([HDF5_LDFLAGS])
AC_SUBST([HDF5_LIBS])
AC_SUBST([HDF5_FC])
AC_SUBST([HDF5_FFLAGS])
AC_SUBST([HDF5_FLIBS])
AC_DEFINE([HAVE_HDF5], [1], [Defined if you have HDF5 support])
else
dnl Time to give up.
AC_MSG_CHECKING([for HDF5 libraries])
AC_MSG_RESULT([no])
AC_MSG_WARN(m4_case(m4_normalize([$1]),
[serial], [
Unable to locate serial HDF5 compilation helper script 'h5cc'.
Please specify --with-hdf5=<LOCATION> as the full path to h5cc.
HDF5 support is being disabled.
......@@ -172,9 +201,12 @@ Unable to locate HDF5 compilation helper scripts 'h5cc' or 'h5pcc'.
Please specify --with-hdf5=<LOCATION> as the full path to h5cc or h5pcc.
HDF5 support is being disabled.
]))
with_hdf5="no"
with_hdf5_fortran="no"
with_hdf5="no"
with_hdf5_fortran="no"
fi
else
AC_MSG_CHECKING([for HDF5 libraries])
dnl Get the h5cc output
HDF5_SHOW=$(eval $H5CC -show)
......
......@@ -45,7 +45,8 @@ include_HEADERS = space.h runner.h queue.h task.h lock.h cell.h part.h const.h \
physical_constants.h physical_constants_cgs.h potential.h version.h \
hydro_properties.h riemann.h threadpool.h cooling.h cooling_struct.h sourceterms.h \
sourceterms_struct.h statistics.h memswap.h cache.h runner_doiact_vec.h profiler.h \
dump.h logger.h active.h timeline.h sort_part.h xmf.h gravity_properties.h gravity_derivatives.h
dump.h logger.h active.h timeline.h xmf.h gravity_properties.h gravity_derivatives.h \
vector_power.h hydro_space.h sort_part.h
# Common source files
AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \
......@@ -55,7 +56,8 @@ AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \
physical_constants.c potential.c hydro_properties.c \
runner_doiact_fft.c threadpool.c cooling.c sourceterms.c \
statistics.c runner_doiact_vec.c profiler.c dump.c logger.c \
part_type.c xmf.c gravity_properties.c gravity.c
part_type.c xmf.c gravity_properties.c gravity.c \
hydro_space.c
# Include files for distribution, not installation.
nobase_noinst_HEADERS = align.h approx_math.h atomic.h cycle.h error.h inline.h kernel_hydro.h kernel_gravity.h \
......
......@@ -1114,7 +1114,7 @@ void cell_check_multipole(struct cell *c, void *data) {
#ifdef SWIFT_DEBUG_CHECKS
struct gravity_tensors ma;
const double tolerance = 1e-5; /* Relative */
const double tolerance = 1e-3; /* Relative */
/* First recurse */
if (c->split)
......@@ -1127,8 +1127,7 @@ void cell_check_multipole(struct cell *c, void *data) {
gravity_P2M(&ma, c->gparts, c->gcount);
/* Now compare the multipole expansion */
if (!gravity_multipole_equal(&ma.m_pole, &c->multipole->m_pole,
tolerance)) {
if (!gravity_multipole_equal(&ma, c->multipole, tolerance)) {
message("Multipoles are not equal at depth=%d!", c->depth);
message("Correct answer:");
gravity_multipole_print(&ma.m_pole);
......@@ -1487,7 +1486,7 @@ void cell_drift_all_multipoles(struct cell *c, const struct engine *e) {
} else if (ti_current > ti_old_multipole) {
/* Drift the multipole */
gravity_multipole_drift(c->multipole, dt);
gravity_drift(c->multipole, dt);
}
/* Update the time of the last drift */
......@@ -1504,7 +1503,21 @@ void cell_drift_all_multipoles(struct cell *c, const struct engine *e) {
* @param e The #engine (to get ti_current).
*/
void cell_drift_multipole(struct cell *c, const struct engine *e) {
error("To be implemented");
const double timeBase = e->timeBase;
const integertime_t ti_old_multipole = c->ti_old_multipole;
const integertime_t ti_current = e->ti_current;
/* Drift from the last time the cell was drifted to the current time */
const double dt = (ti_current - ti_old_multipole) * timeBase;
/* Check that we are actually going to move forward. */
if (ti_current < ti_old_multipole) error("Attempt to drift to the past");
if (ti_current > ti_old_multipole) gravity_drift(c->multipole, dt);
/* Update the time of the last drift */
c->ti_old_multipole = ti_current;
}
/**
......
......@@ -39,9 +39,6 @@
/* Thermal energy per unit mass used as a constant for the isothermal EoS */
#define const_isothermal_internal_energy 20.2615290634f
/* Self gravity stuff. */
#define const_gravity_multipole_order 1
/* Type of gradients to use (GIZMO_SPH only) */
/* If no option is chosen, no gradients are used (first order scheme) */
//#define GRADIENTS_SPH
......@@ -57,6 +54,23 @@
//#define GIZMO_FIX_PARTICLES
//#define GIZMO_TOTAL_ENERGY
/* Types of gradients to use for SHADOWFAX_SPH */
/* If no option is chosen, no gradients are used (first order scheme) */
#define SHADOWFAX_GRADIENTS
/* SHADOWFAX_SPH slope limiters */
#define SHADOWFAX_SLOPE_LIMITER_PER_FACE
#define SHADOWFAX_SLOPE_LIMITER_CELL_WIDE
/* Options to control SHADOWFAX_SPH */
/* This option disables cell movement */
//#define SHADOWFAX_FIX_CELLS
/* This option enables cell steering, i.e. trying to keep the cells regular by
adding a correction to the cell velocities.*/
#define SHADOWFAX_STEER_CELL_MOTION
/* This option evolves the total energy instead of the thermal energy */
//#define SHADOWFAX_TOTAL_ENERGY
/* Source terms */
#define SOURCETERMS_NONE
//#define SOURCETERMS_SN_FEEDBACK
......
......@@ -49,6 +49,8 @@
#include "./hydro/Default/hydro_debug.h"
#elif defined(GIZMO_SPH)
#include "./hydro/Gizmo/hydro_debug.h"
#elif defined(SHADOWFAX_SPH)
#include "./hydro/Shadowswift/hydro_debug.h"
#else
#error "Invalid choice of SPH variant"
#endif
......
......@@ -3029,6 +3029,13 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs) {
if (e->nodeID == 0) message("Computing initial gas densities.");
/* Initialise the softening lengths */
if (e->policy & engine_policy_self_gravity) {
for (size_t i = 0; i < s->nr_gparts; ++i)
gravity_init_softening(&s->gparts[i], e->gravity_properties);
}
/* Construct all cells and tasks to start everything */
engine_rebuild(e);
......@@ -3061,16 +3068,17 @@ void engine_init_particles(struct engine *e, int flag_entropy_ICs) {
}
#ifdef SWIFT_DEBUG_CHECKS
/* Let's store the total mass in the system for future checks */
e->s->total_mass = 0.;
for (size_t i = 0; i < s->nr_gparts; ++i)
e->s->total_mass += s->gparts[i].mass;
#ifdef WITH_MPI
if (MPI_Allreduce(MPI_IN_PLACE, &e->s->total_mass, 1, MPI_DOUBLE, MPI_SUM,
MPI_COMM_WORLD) != MPI_SUCCESS)
error("Failed to all-reduce total mass in the system.");
#endif
message("Total mass in the system: %e", e->s->total_mass);
/* Check that we have the correct total mass in the top-level multipoles */
size_t num_gpart_mpole = 0;
if (e->policy & engine_policy_self_gravity) {
for (int i = 0; i < e->s->nr_cells; ++i)
num_gpart_mpole += e->s->cells_top[i].multipole->m_pole.num_gpart;
if (num_gpart_mpole != e->s->nr_gparts)
error(
"Multipoles don't contain the total number of gpart s->nr_gpart=%zd, "
"m_poles=%zd",
e->s->nr_gparts, num_gpart_mpole);