Commit ab551762 authored by Peter W. Draper's avatar Peter W. Draper
Browse files

Merge branch 'master' into initial_partitions

parents 548403d9 dbe0ac66
......@@ -3,3 +3,8 @@ Matthieu Schaller matthieu.schaller@durham.ac.uk
Aidan Chalk aidan.chalk@durham.ac.uk
Peter W. Draper p.w.draper@durham.ac.uk
Bert Vandenbrouck bert.vandenbroucke@gmail.com
James S. Willis james.s.willis@durham.ac.uk
John A. Regan john.a.regan@durham.ac.uk
Angus Lepper angus.lepper@ed.ac.uk
Tom Theuns tom.theuns@durham.ac.uk
Richard G. Bower r.g.bower@durham.ac.uk
......@@ -75,26 +75,33 @@ SWIFT depends on a number of third party libraries that should be available
before you can build it.
HDF5: a HDF5 library is required to read and write particle data. One of the
commands "h5cc" or "h5pcc" should be available. If "h5pcc" is located them a
parallel HDF5 built for the version of MPI located should be provided. If
the command is not available then it can be located using the "--with-hfd5"
configure option. The value should be the full path to the "h5cc" or "h5pcc"
commands.
HDF5: a HDF5 library (v. 1.8.x or higher) is required to read and write
particle data. One of the commands "h5cc" or "h5pcc" should be
available. If "h5pcc" is located them a parallel HDF5 built for the version
of MPI located should be provided. If the command is not available then it
can be located using the "--with-hfd5" configure option. The value should
be the full path to the "h5cc" or "h5pcc" commands.
MPI: an optional MPI library that fully supports MPI_THREAD_MULTIPLE.
MPI: an optional MPI library that fully supports MPI_THREAD_MULTIPLE.
Before running configure the "mpirun" command should be available in the
shell. If your command isn't called "mpirun" then define the "MPIRUN"
environment variable, either in the shell or when running configure.
METIS: a build of the METIS library can be optionally used to optimize the
load between MPI nodes (requires an MPI library). This should be found in the
standard installation directories, or pointed at using the "--with-metis"
configuration option. In this case the top-level installation directory of
the METIS build should be given. Note to use METIS you should at least supply
"--with-metis".
load between MPI nodes (requires an MPI library). This should be found in
the standard installation directories, or pointed at using the
"--with-metis" configuration option. In this case the top-level
installation directory of the METIS build should be given. Note to use
METIS you should at least supply "--with-metis".
DOXYGEN: the doxygen library is required to create the SWIFT API documentation.
libNUMA: a build of the NUMA library can be used to pin the threads to
the physical core of the machine SWIFT is running on. This is not always
necessary as the OS scheduler may do a good job at distributing the threads
among the different cores on each computing node.
DOXYGEN: the doxygen library is required to create the SWIFT API
documentation.
......@@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Init the project.
AC_INIT([SWIFT],[0.1.0])
AC_INIT([SWIFT],[0.2.0])
AC_CONFIG_SRCDIR([src/space.c])
AC_CONFIG_AUX_DIR([.])
AM_INIT_AUTOMAKE
......@@ -270,11 +270,13 @@ AC_CHECK_FUNC(pthread_setaffinity_np, AC_DEFINE([HAVE_SETAFFINITY],[true],
AM_CONDITIONAL(HAVESETAFFINITY,
[test "$ac_cv_func_pthread_setaffinity_np" = "yes"])
have_numa="no"
if test "$ac_cv_func_pthread_setaffinity_np" = "yes"; then
# Check for libnuma.
AC_CHECK_HEADER([numa.h])
if test "$ac_cv_header_numa_h" = "yes"; then
AC_CHECK_LIB([numa], [numa_available])
have_numa="yes"
fi
fi
......@@ -335,14 +337,15 @@ AC_CONFIG_FILES([Makefile src/Makefile examples/Makefile doc/Makefile doc/Doxyfi
# Report general configuration.
AC_MSG_RESULT([
Compiler: $CC
vendor: $ax_cv_c_compiler_vendor
version: $ax_cv_c_compiler_version
flags: $CFLAGS
MPI enabled: $enable_mpi
HDF5 enabled: $with_hdf5
parallel: $have_parallel_hdf5
Metis enabled: $with_metis
Compiler : $CC
- vendor : $ax_cv_c_compiler_vendor
- version : $ax_cv_c_compiler_version
- flags : $CFLAGS
MPI enabled : $enable_mpi
HDF5 enabled : $with_hdf5
- parallel : $have_parallel_hdf5
Metis enabled : $with_metis
libNUMA enabled : $have_numa
])
# Generate output.
......
......@@ -61,3 +61,20 @@ swift_fixdt_mpi_SOURCES = main.c
swift_fixdt_mpi_CFLAGS = $(MYFLAGS) $(AM_CFLAGS) $(MPI_FLAGS) -DENGINE_POLICY="engine_policy_fixdt | engine_policy_keep $(ENGINE_POLICY_SETAFFINITY)"
swift_fixdt_mpi_LDADD = ../src/.libs/libswiftsim_mpi.a $(HDF5_LDFLAGS) $(HDF5_LIBS) $(MPI_LIBS)
# Scripts to generate ICs
EXTRA_DIST = UniformBox/makeIC.py \
PerturbedBox/makeIC.py \
SedovBlast/makeIC.py SedovBlast/makeIC_fcc.py SedovBlast/solution.py \
SodShock/makeIC.py SodShock/solution.py SodShock/glass_001.hdf5 SodShock/glass_002.hdf5 SodShock/rhox.py \
CosmoVolume/getIC.sh \
BigCosmoVolume/makeIC.py \
BigPerturbedBox/makeIC_fcc.py \
GreshoVortex/makeIC.py GreshoVortex/solution.py
# Scripts to plot task graphs
EXTRA_DIST += plot_tasks_MPI.py plot_tasks.py \
process_plot_tasks_MPI process_plot_tasks
# Simple run scripts
EXTRA_DIST += runs.sh
###############################################################################
# This file is part of SWIFT.
# Copyright (c) 2016 Matthieu Schaller (matthieu.schaller@durham.ac.uk)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import h5py
import sys
from numpy import *
# Generates a swift IC file containing a cartesian distribution of particles
# at a constant density and pressure in a cubic box
# Parameters
periodic= 1 # 1 For periodic box
boxSize = 1.
L = int(sys.argv[1]) # Number of particles along one axis
N = int(sys.argv[2]) # Write N particles at a time to avoid requiring a lot of RAM
rho = 2. # Density
P = 1. # Pressure
gamma = 5./3. # Gas adiabatic index
fileName = "uniformBox_%d.hdf5"%L
#---------------------------------------------------
numPart = L**3
mass = boxSize**3 * rho / numPart
internalEnergy = P / ((gamma - 1.)*rho)
#---------------------------------------------------
n_iterations = numPart / N
remainder = numPart % N
print "Writing", numPart, "in", n_iterations, "iterations of size", N, "and a remainder of", remainder
#---------------------------------------------------
#File
file = h5py.File(fileName, 'w')
# Header
grp = file.create_group("/Header")
grp.attrs["BoxSize"] = boxSize
grp.attrs["NumPart_Total"] = [numPart % (long(1)<<32), 0, 0, 0, 0, 0]
grp.attrs["NumPart_Total_HighWord"] = [numPart / (long(1)<<32), 0, 0, 0, 0, 0]
grp.attrs["NumPart_ThisFile"] = [numPart, 0, 0, 0, 0, 0]
grp.attrs["Time"] = 0.0
grp.attrs["NumFilesPerSnapshot"] = 1
grp.attrs["MassTable"] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
grp.attrs["Flag_Entropy_ICs"] = 0
#Runtime parameters
grp = file.create_group("/RuntimePars")
grp.attrs["PeriodicBoundariesOn"] = periodic
#Particle group
grp = file.create_group("/PartType0")
# First create the arrays in the file
ds_v = grp.create_dataset('Velocities', (numPart, 3), 'f', chunks=True, compression="gzip")
ds_m = grp.create_dataset('Masses', (numPart,1), 'f', chunks=True, compression="gzip")
ds_h = grp.create_dataset('SmoothingLength', (numPart,1), 'f', chunks=True, compression="gzip")
ds_u = grp.create_dataset('InternalEnergy', (numPart,1), 'f', chunks=True, compression="gzip")
ds_id = grp.create_dataset('ParticleIDs', (numPart, 1), 'L', chunks=True, compression="gzip")
ds_x = grp.create_dataset('Coordinates', (numPart, 3), 'd', chunks=True, compression="gzip")
# Now loop and create parts of the dataset
offset = 0
for n in range(n_iterations):
v = zeros((N, 3))
ds_v[offset:offset+N,:] = v
v = zeros(1)
m = full((N, 1), mass)
ds_m[offset:offset+N] = m
m = zeros(1)
h = full((N, 1), 1.1255 * boxSize / L)
ds_h[offset:offset+N] = h
h = zeros(1)
u = full((N, 1), internalEnergy)
ds_u[offset:offset+N] = u
u = zeros(1)
ids = linspace(offset, offset+N, N, endpoint=False).reshape((N,1))
ds_id[offset:offset+N] = ids
x = ids % L;
y = ((ids - x) / L) % L;
z = (ids - x - L * y) / L**2;
ids = zeros(1)
coords = zeros((N, 3))
coords[:,0] = z[:,0] * boxSize / L + boxSize / (2*L)
coords[:,1] = y[:,0] * boxSize / L + boxSize / (2*L)
coords[:,2] = x[:,0] * boxSize / L + boxSize / (2*L)
ds_x[offset:offset+N,:] = coords
coords = zeros((1,3))
offset += N
print "Done", offset,"/", numPart, "(%.1f %%)"%(100*(float)(offset)/numPart)
# And now, the remainder
v = zeros((remainder, 3))
ds_v[offset:offset+remainder,:] = v
v = zeros(1)
m = full((remainder, 1), mass)
ds_m[offset:offset+remainder] = m
m = zeros(1)
h = full((remainder, 1), 1.1255 * boxSize / L)
ds_h[offset:offset+remainder] = h
h = zeros(1)
u = full((remainder, 1), internalEnergy)
ds_u[offset:offset+remainder] = u
u = zeros(1)
print "Done", offset+remainder,"/", numPart
ids = linspace(offset, offset+remainder, remainder, endpoint=False).reshape((remainder,1))
ds_id[offset:offset+remainder] = ids
x = ids % L;
y = ((ids - x) / L) % L;
z = (ids - x - L * y) / L**2;
coords = zeros((remainder, 3))
coords[:,0] = z[:,0] * boxSize / L + boxSize / (2*L)
coords[:,1] = y[:,0] * boxSize / L + boxSize / (2*L)
coords[:,2] = x[:,0] * boxSize / L + boxSize / (2*L)
ds_x[offset:offset+remainder,:] = coords
file.close()
......@@ -44,17 +44,22 @@ AM_SOURCES = space.c runner.c queue.c task.c cell.c engine.c \
kernel.c tools.c part.c partition.c
# Include files for distribution, not installation.
noinst_HEADERS = approx_math.h atomic.h cycle.h error.h inline.h kernel.h vector.h \
runner_doiact.h runner_doiact_grav.h units.h intrinsics.h \
hydro.h hydro_io.h gravity.h \
nobase_noinst_HEADERS = approx_math.h atomic.h cycle.h error.h inline.h kernel.h vector.h \
runner_doiact.h runner_doiact_grav.h units.h intrinsics.h minmax.h \
gravity.h \
gravity/Default/gravity.h gravity/Default/runner_iact_grav.h \
gravity/Default/gravity_part.h
gravity/Default/gravity_part.h \
hydro.h hydro_io.h \
hydro/Minimal/hydro.h hydro/Minimal/hydro_iact.h hydro/Minimal/hydro_io.h \
hydro/Minimal/hydro_debug.h hydro/Minimal/hydro_part.h
hydro/Minimal/hydro_debug.h hydro/Minimal/hydro_part.h \
hydro/Default/hydro.h hydro/Default/hydro_iact.h hydro/Default/hydro_io.h \
hydro/Default/hydro_debug.h hydro/Default/hydro_part.h \
hydro/Gadget2/hydro.h hydro/Gadget2/hydro_iact.h hydro/Gadget2/hydro_io.h \
hydro/Gadget2/hydro_debug.h hydro/Gadget2/hydro_part.h
hydro/Gadget2/hydro_debug.h hydro/Gadget2/hydro_part.h \
hydro/Gizmo/hydro.h hydro/Gizmo/hydro_iact.h hydro/Gizmo/hydro_io.h \
hydro/Gizmo/hydro_debug.h hydro/Gizmo/hydro_part.h \
riemann.h \
riemann/riemann_hllc.h riemann/riemann_trrs.h riemann/riemann_exact.h
# Sources and flags for regular library
libswiftsim_la_SOURCES = $(AM_SOURCES)
......
......@@ -88,8 +88,8 @@ int cell_unpack(struct pcell *pc, struct cell *c, struct space *s) {
/* Unpack the current pcell. */
c->h_max = pc->h_max;
c->t_end_min = pc->t_end_min;
c->t_end_max = pc->t_end_max;
c->ti_end_min = pc->ti_end_min;
c->ti_end_max = pc->ti_end_max;
c->count = pc->count;
c->tag = pc->tag;
......@@ -162,8 +162,8 @@ int cell_pack(struct cell *c, struct pcell *pc) {
/* Start by packing the data of the current cell. */
pc->h_max = c->h_max;
pc->t_end_min = c->t_end_min;
pc->t_end_max = c->t_end_max;
pc->ti_end_min = c->ti_end_min;
pc->ti_end_max = c->ti_end_max;
pc->count = c->count;
c->tag = pc->tag = atomic_inc(&cell_next_tag) % cell_max_tag;
......@@ -557,10 +557,11 @@ void cell_init_parts(struct cell *c, void *data) {
struct part *p = c->parts;
struct xpart *xp = c->xparts;
const int count = c->count;
for (int i = 0; i < c->count; ++i) {
p[i].t_begin = 0.;
p[i].t_end = 0.;
for (int i = 0; i < count; ++i) {
p[i].ti_begin = 0;
p[i].ti_end = 0;
xp[i].v_full[0] = p[i].v[0];
xp[i].v_full[1] = p[i].v[1];
xp[i].v_full[2] = p[i].v[2];
......@@ -568,7 +569,8 @@ void cell_init_parts(struct cell *c, void *data) {
hydro_init_part(&p[i]);
hydro_reset_acceleration(&p[i]);
}
c->t_end_min = 0.;
c->ti_end_min = 0;
c->ti_end_max = 0;
}
/**
......
......@@ -40,7 +40,8 @@ extern int cell_next_tag;
struct pcell {
/* Stats on this cell's particles. */
double h_max, t_end_min, t_end_max;
double h_max;
int ti_end_min, ti_end_max;
/* Number of particles in this cell. */
int count;
......@@ -65,7 +66,7 @@ struct cell {
double h_max;
/* Minimum and maximum end of time step in this cell. */
double t_end_min, t_end_max;
int ti_end_min, ti_end_max;
/* Minimum dimension, i.e. smallest edge of this cell. */
float dmin;
......
......@@ -75,12 +75,12 @@ void printgParticle(struct gpart *parts, long long int id, int N) {
if (parts[i].id == -id || (parts[i].id > 0 && parts[i].part->id == id)) {
printf(
"## gParticle[%d]: id=%lld, x=[%.16e,%.16e,%.16e], "
"v=[%.3e,%.3e,%.3e], a=[%.3e,%.3e,%.3e], m=%.3e, t_begin=%.3e, "
"t_end=%.3e\n",
"v=[%.3e,%.3e,%.3e], a=[%.3e,%.3e,%.3e], m=%.3e, t_begin=%d, "
"t_end=%d\n",
i, parts[i].part->id, parts[i].x[0], parts[i].x[1], parts[i].x[2],
parts[i].v[0], parts[i].v[1], parts[i].v[2], parts[i].a[0],
parts[i].a[1], parts[i].a[2], parts[i].mass, parts[i].t_begin,
parts[i].t_end);
parts[i].a[1], parts[i].a[2], parts[i].mass, parts[i].ti_begin,
parts[i].ti_end);
found = 1;
}
......
......@@ -49,6 +49,8 @@
#include "cycle.h"
#include "debug.h"
#include "error.h"
#include "hydro.h"
#include "minmax.h"
#include "part.h"
#include "timers.h"
#include "partition.h"
......@@ -995,7 +997,7 @@ int engine_marktasks(struct engine *e) {
struct scheduler *s = &e->sched;
int k, nr_tasks = s->nr_tasks, *ind = s->tasks_ind;
struct task *t, *tasks = s->tasks;
float t_end = e->time;
float ti_end = e->ti_current;
struct cell *ci, *cj;
// ticks tic = getticks();
......@@ -1057,7 +1059,7 @@ int engine_marktasks(struct engine *e) {
(t->type == task_type_sub && t->cj == NULL)) {
/* Set this task's skip. */
t->skip = (t->ci->t_end_min > t_end);
t->skip = (t->ci->ti_end_min > ti_end);
}
/* Pair? */
......@@ -1069,7 +1071,7 @@ int engine_marktasks(struct engine *e) {
cj = t->cj;
/* Set this task's skip. */
t->skip = (ci->t_end_min > t_end && cj->t_end_min > t_end);
t->skip = (ci->ti_end_min > ti_end && cj->ti_end_min > ti_end);
/* Too much particle movement? */
if (t->tight &&
......@@ -1094,7 +1096,8 @@ int engine_marktasks(struct engine *e) {
/* Kick? */
else if (t->type == task_type_kick) {
t->skip = (t->ci->t_end_min > t_end);
t->skip = (t->ci->ti_end_min > ti_end);
t->ci->updated = 0;
}
/* Drift? */
......@@ -1104,7 +1107,7 @@ int engine_marktasks(struct engine *e) {
/* Init? */
else if (t->type == task_type_init) {
/* Set this task's skip. */
t->skip = (t->ci->t_end_min > t_end);
t->skip = (t->ci->ti_end_min > ti_end);
}
/* None? */
......@@ -1290,7 +1293,7 @@ void engine_barrier(struct engine *e, int tid) {
void engine_collect_kick(struct cell *c) {
int updated = 0;
float t_end_min = FLT_MAX, t_end_max = 0.0f;
int ti_end_min = max_nr_timesteps, ti_end_max = 0;
double e_kin = 0.0, e_int = 0.0, e_pot = 0.0;
float mom[3] = {0.0f, 0.0f, 0.0f}, ang[3] = {0.0f, 0.0f, 0.0f};
struct cell *cp;
......@@ -1312,8 +1315,8 @@ void engine_collect_kick(struct cell *c) {
engine_collect_kick(cp);
/* And update */
t_end_min = fminf(t_end_min, cp->t_end_min);
t_end_max = fmaxf(t_end_max, cp->t_end_max);
ti_end_min = min(ti_end_min, cp->ti_end_min);
ti_end_max = max(ti_end_max, cp->ti_end_max);
updated += cp->updated;
e_kin += cp->e_kin;
e_int += cp->e_int;
......@@ -1328,8 +1331,8 @@ void engine_collect_kick(struct cell *c) {
}
/* Store the collected values in the cell. */
c->t_end_min = t_end_min;
c->t_end_max = t_end_max;
c->ti_end_min = ti_end_min;
c->ti_end_max = ti_end_max;
c->updated = updated;
c->e_kin = e_kin;
c->e_int = e_int;
......@@ -1390,14 +1393,14 @@ void engine_init_particles(struct engine *e) {
struct space *s = e->s;
message("Initialising particles");
engine_prepare(e);
if(e->nodeID == 0) message("Initialising particles");
/* Make sure all particles are ready to go */
/* i.e. clean-up any stupid state in the ICs */
space_map_cells_pre(s, 1, cell_init_parts, NULL);
engine_prepare(e);
engine_marktasks(e);
// printParticle(e->s->parts, 1000, e->s->nr_parts);
......@@ -1449,7 +1452,7 @@ void engine_init_particles(struct engine *e) {
engine_launch(e, e->nr_threads, mask, submask);
TIMER_TOC(timer_runners);
// message("\n0th ENTROPY CONVERSION\n")
// message("\n0th ENTROPY CONVERSION\n")
/* Apply some conversions (e.g. internal energy -> entropy) */
space_map_cells_pre(s, 1, cell_convert_hydro, NULL);
......@@ -1470,7 +1473,7 @@ void engine_step(struct engine *e) {
int k;
int updates = 0;
float t_end_min = FLT_MAX, t_end_max = 0.f;
int ti_end_min = max_nr_timesteps, ti_end_max = 0;
double e_pot = 0.0, e_int = 0.0, e_kin = 0.0;
float mom[3] = {0.0, 0.0, 0.0};
float ang[3] = {0.0, 0.0, 0.0};
......@@ -1488,8 +1491,8 @@ void engine_step(struct engine *e) {
engine_collect_kick(c);
/* And aggregate */
t_end_min = fminf(t_end_min, c->t_end_min);
t_end_max = fmaxf(t_end_max, c->t_end_max);
ti_end_min = min(ti_end_min, c->ti_end_min);
ti_end_max = max(ti_end_max, c->ti_end_max);
e_kin += c->e_kin;
e_int += c->e_int;
e_pot += c->e_pot;
......@@ -1504,37 +1507,40 @@ void engine_step(struct engine *e) {
/* Aggregate the data from the different nodes. */
#ifdef WITH_MPI
double in[4], out[4];
out[0] = t_end_min;
if (MPI_Allreduce(out, in, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD) !=
int in_i[4], out_i[4];
out_i[0] = ti_end_min;
if (MPI_Allreduce(out_i, in_i, 1, MPI_INT, MPI_MIN, MPI_COMM_WORLD) !=
MPI_SUCCESS)
error("Failed to aggregate t_end_min.");
t_end_min = in[0];
out[0] = t_end_max;
if (MPI_Allreduce(out, in, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD) !=
ti_end_min = in_i[0];
out_i[0] = ti_end_max;
if (MPI_Allreduce(out_i, in_i, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD) !=
MPI_SUCCESS)
error("Failed to aggregate t_end_max.");
t_end_max = in[0];
out[0] = updates;
out[1] = e_kin;
out[2] = e_int;
out[3] = e_pot;
if (MPI_Allreduce(out, in, 3, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD) !=
ti_end_max = in_i[0];
double in_d[4], out_d[4];
out_d[0] = updates;
out_d[1] = e_kin;
out_d[2] = e_int;
out_d[3] = e_pot;
if (MPI_Allreduce(out_d, in_d, 4, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD) !=
MPI_SUCCESS)
error("Failed to aggregate energies.");
updates = in[0];
e_kin = in[1];
e_int = in[2];
e_pot = in[3];
updates = in_d[0];
e_kin = in_d[1];
e_int = in_d[2];
e_pot = in_d[3];
#endif
// message("\nDRIFT\n");
/* Move forward in time */
e->timeOld = e->time;
e->time = t_end_min;
e->ti_old = e->ti_current;
e->ti_current = ti_end_min;
e->step += 1;
e->timeStep = e->time - e->timeOld;
e->time = e->ti_current * e->timeBase + e->timeBegin;
e->timeOld = e->ti_old * e->timeBase + e->timeBegin;
e->timeStep = (e->ti_current - e->ti_old) * e->timeBase;
/* Drift everybody */
engine_launch(e, e->nr_threads, 1 << task_type_drift, 0);
......@@ -1544,6 +1550,20 @@ void engine_step(struct engine *e) {
// if(e->step == 2) exit(0);
if (e->nodeID == 0) {
/* Print some information to the screen */
printf("%d %e %e %d %.3f\n", e->step, e->time, e->timeStep, updates,
e->wallclock_time);
fflush(stdout);
/* Write some energy statistics */
fprintf(e->file_stats, "%d %f %f %f %f %f %f %f %f %f %f %f\n", e->step,
e->time, e_kin, e_int, e_pot, e_kin + e_int + e_pot, mom[0], mom[1],
mom[2], ang[0], ang[1], ang[2]);
fflush(e->file_stats);
}
// message("\nACCELERATION AND KICK\n");
/* Re-distribute the particles amongst the nodes? */
......@@ -1600,20 +1620,7 @@ void engine_step(struct engine *e) {
TIMER_TOC2(timer_step);
if (e->nodeID == 0) {